非并发:
import newspaper
from newspaper import Article
def get_headlines():
URLs = ['http://www.foxnews.com/',
'http://www.cnn.com/',
'http://www.derspiegel.de/',
'http://www.bbc.co.uk/',
'https://theguardian.com',]
for url in URLs:
result = newspaper.build(url, memoize_articles=False)
print(result)
print('\n''The headlines from %s are' % url, '\n')
for i in range(1,6):
art = result.articles[i]
art.download()
art.parse()
print(art.title)
if __name__ == '__main__':
import timeit
elapsed_time = timeit.timeit("get_headlines()", setup="from __main__ import get_headlines", number=2)/2
print(elapsed_time)
用with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor改为并发
提高抓取速度
from concurrent.futures import ThreadPoolExecutor, as_completed
def search(url):
'''这里完成一个对一个url的操作,自己写搜索逻辑'''
return url # 这里返回url,用于在主线程中观察哪些url完成操作
pool = ThreadPoolExecutor(max_workers=5)
all_task = [pool.submit(search, url) for url in urls] # urls是你定义的待抓取url列表
for future in as_completed(all_task):
target_url = future.result()
print('该url【{}】已完成抓取'.format(target_url))
代码还是要自己多写写,熟练熟练
这个,不太会有人帮助写吧……