Python爬虫运行直接进程已结束

问题遇到的现象和发生背景

为什么我写完爬虫,运行直接就进程已结束

问题相关代码,请勿粘贴截图
import urllib.request
def tieba_spider(url, begin_page, end_page):
    for page in range(begin_page, end_page + 1 ):
        pn = (page - 1) * 50
        file_name="第"+str(page)+"页.html"
        full_url=url+"&pn="+str(pn)
        html = load_papg(full_url, file_name)
        write_page(html,file_name)
def load_papg(url, filename):
    headers = {"User-Agent":"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT6.1; Trident/5.0)"}
    request = urllib.request.Request(url, headers=headers)
    return urllib.request.urlopen(request).read()
def write_page(html,filename):
    print("正在保存:"+filename)
    with open(filename, 'w', encoding='utf-8') as file:
        file.write(html.decode('utf-8'))
if __name__=='__mian__':
    kw = input("请输入您需要爬取的网页");
    begin_page = int(input("请输入你需要爬取的首页数"));
    end_page = int(input("请输入你需要爬取的尾页数"));
    url = 'https://tieba.baidu.com/f?'
    key = urllib.parse.urlencode({"kw": kw})
    url = url+key
    tieba_spider(url, begin_page, end_page)



if __name__ == '__main__':
main写成mian了