为什么爬取豆瓣top250得到是的是个空列表啊

# import requests
# from bs4 import BeautifulSoup

# #网页爬取
# def getHtml(url):
#     r=requests.get(url)
#     r.encoding='utf-8'
#     return r.text
# #parserHtml()函数为网页内容解析函数
#  #数据解析
# def parserHtml(html,name,attrs):
#     result=[]
#     soup=BeautifulSoup(html,"html.parser")
#     data=soup.find_all(name,attrs)
#     for i in data:
#         info=[]
#         file_name=i.find("span",{'class':"title"})
#         #在每一个name标签中找第一个span标签
#         score=i.find('span',{'class':'rating_num'})#找第一个class标签
#         #for j in tagA:
#         info.append(file_name.string)
#         info.append(score.string)
#         result.append(info)
#     return result 
#数据储存
# import csv
# def writeFile(data,filename):
#     save_path=save_path="./"+filename
#     with open(save_path,'a',errors='ignore',newline='')as fd:
#         f_csv=csv.writer(fd)
#         f_csv.writerow(data)
# #数据格式化输出
# def display(data):
#     print("{1:<10}{2:{0}^20}{3:{0}^16}".format(chr(12288),"序号","电影名","豆瓣评分"))
#     for i in range(len(data)):
#         print("{1:<10}{2:{0}^20}{3:{0}^30}".format(chr(12288),i+1,data[i][0],data[i][1]))
# #main函数定义及调用
# def main():
#     result=[]
#     
```python
url="https://movie.douban.com/top250"

html=getHtml(url)

result=parserHtml(html,"div",{'class':'info'})

display(result)

#主函数调用
#main()

```

被反扒了,加上User-Agent

def getHtml(url):
    r=requests.get(url,headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'})
    r.encoding='utf-8'
    return r.text

你就是被反爬了,就算跟进楼上说的加User-agent,也很快被反爬,爬虫这块,还需好好研究,你懂的