import requests
from future.backports.test.ssl_servers import threading
while True:
mes = requests.get(url, headers=header, timeout=5)
print(str(1) + str(mes.status_code) + ':' + str(mes.text))
+ str(data)def qian2():
data = 201820192001155
url = '
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36',
'Referer': 'http://172.30.1.70/srun_portal_pc.php?ac_id=1',
'cookie: cookie
}
while True:
mes = requests.get(url, headers=header, timeout=5)
print(str(2)+str(mes.status_code) + ':' + str(mes.text))
def main():
thread1 = threading.Thread(target=qian)
thread2 = threading.Thread(target=qian1)
thread3 = threading.Thread(target=qian2)
thread4 = threading.Thread(target=qian)
thread5 = threading.Thread(target=qian1)
thread6 = threading.Thread(target=qian2)
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
thread6.start()
main()
在每个函数里都写了while循环,且没有终止条件,没有设定time.sleep,请求太过频繁,容易被识别为爬虫而被禁止。去掉while循环或在循环中加入time.sleep()试试,另外在主函数中写上threading1.join(),threading2.join(),也可将线程加入一列表,遍历start(),然后再join()。可参考相关文章,比如:
建议现用单线程执行一下,然后你发送请求的URL要不要在登录状态下访问,,你将请求内容输出看看内容