在python3.9环境下,使用threading多线程爬取百度图片,线程启动后,爬取的图片更少
import requests
import os
import re
import time
import threading
from queue import Queue
lock = threading.Lock()
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.81 Safari/537.36 Edg/104.0.1293.47"
}
def image():
time_start = time.time()
keyword = input()
save_dir = keyword
page_num = 4
# 请求的 url
url = 'https://image.baidu.com/search/acjson?'#这是个ajkx请求
#生存列表,存储图片链接
image_url_list = []
for pn in range(0, 30 * page_num, 30):
# 请求参数
param = {'tn': 'resultjson_com',
'logid': '7603311155072595725',
'ipn': 'rj',
'ct': 201326592,
'is': '',
'fp': 'result',
'queryWord': keyword,
'cl': 2,
'lm': -1,
'ie': 'utf-8',
'oe': 'utf-8',
'adpicid': '',
'st': -1,
'z': '',
'ic': '',
'hd': '',
'latest': '',
'copyright': '',
'word': keyword,
's': '',
'se': '',
'tab': '',
'width': '',
'height': '',
'face': 0,
'istype': 2,
'qc': '',
'nc': '1',
'fr': '',
'expermode': '',
'force': '',
'cg': '',
'pn': pn, # 显示:30-60-90
'rn': '30', # 每页显示 30 条
'gsm': '1e',
'1618827096642': ''
}
response = requests.get(url=url, headers=header, params=param)
#显示200表示请求成功,但没处理
if response.status_code == 200:
pass
html = response.text.encode('utf-8').decode('utf-8')
# 正则方式提取图片链接
image_url_= re.findall('"thumbURL":"(.*?)",', html, re.S)
#遍历获取到的图片链接,存储在一个列表中,用来构成队列
for Url in image_url_:
image_url_list.append(Url)
print(image_url_list)
#建立不同关键词存储的文件夹
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# #实例化Queue,用来存储图片链接为队列
url_queue = Queue(len(image_url_list))
for a in range(len(image_url_list)): #遍历列表中存储的图片链接
#放入队列中
dict = {}
dict['url'] = image_url_list[a]
dict['num'] = a
url_queue.put(dict)
def get_images(threadNmae, url_queue):
while True:
if not url_queue.empty():
image_url = url_queue.get()['url']
image_data = requests.get(url=image_url, headers=header).content
else :
break
with open(os.path.join(save_dir, '{}.jpg'.format(url_queue.get()['num'])), 'wb') as fp:
fp.write(image_data)
class myThread(threading.Thread):
def __init__(self, name, delay):
threading.Thread.__init__(self)
self.name = name
self.delay = delay
def run(self):
print("Starting " + self.name)
get_images(self.name, self.delay)
print("Exiting " + self.name)
thread1 = myThread("Thread-" +'1',delay=url_queue)
thread1.start()
thread2 = myThread("Thread-" + '2', delay=url_queue)
thread2.start()
image()
尝试过让爬虫休眠在运行,没有效果
爬取的图片能够全部都下载下来
是这样的,我认为,创建一个爬虫线程也需要时间,而两个线程并不能提高多大效率。或许题主可以试试线程池。
多线程爬取会导致爬取顺序错乱,但是量的话是不会少的,等多线程跑完检查一下是不是图片总数跟原来一样的
用multiprocessing提速吧