##spider代码
import scrapy
class MiddleSpider(scrapy.Spider):
name = "middle"
#allowed_domains = ["www.xxx.com"]
start_urls = ["http://www.baidu.com/s?wd=ip"]
def parse(self, response):
page_text=response.text
with open("ip.html","w",encoding="utf-8") as fp:
fp.write(page_text)
##scrapy中间件
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
import random
class MiddleproDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
user_agent_list=['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36'
]
PROXY_http=["114.231.42.244","183.236.232.160"]
PROXY_https=["120.83.49.90:9000","95.189.112.214:35508"]
#拦截请求
def process_request(self, request, spider):
request.headers["User-Agent"]=random.choice(self.user_agent_list)
#验证代理的操作是否会生效
request.meta["proxy"]="http://182.139.110.18"
return None
#拦截所有响应
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
#拦截异常请求
def process_exception(self, request, exception, spider):
if request.url.split(":")[0] == "http":
#代理
request.meta["proxy"]="http://"+random.choice(self.PROXY_http)
else:
request.meta["proxy"]="https://"+random.choice(self.PROXY_https)
return request#将修正后的请求对象重新请求发送
出现如下报错
2023-05-30 18:45:14 [scrapy.downloadermiddlewares.retry] ERROR: Gave up retrying <GET http://www.baidu.com/s?wd=ip> (failed 3 times): TCP connection timed out: 10060: 由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。
测试下你的代理是否可用
Scrapy中间件常见作用有以下两种:
下载中间件:在处理请求时,会优先加载下载中间件,可以在这一层对请求进行处理,例如更换修改代理ip或者修改UA等。
爬虫中间件:在处理响应时,会优先加载爬虫中间件,可以在这一层对响应进行处理,例如筛选无效的数据、特殊情况进行重试等。
编写一个中间件的步骤如下:
下面是一个下载中间件的示例代码,用于在请求中添加自定义的头部信息:
class CustomHeadersMiddleware(object):
def process_request(self, request, spider):
request.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
request.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
request.headers['Accept-Encoding'] = 'gzip, deflate, sdch'
request.headers['Accept-Language'] = 'zh-CN,zh;q=0.8,en;q=0.6'
在这个中间件中,我们重载了process_request()方法,该方法会在请求被发送之前进行调用。我们在这个方法中添加了一些HTTP头部信息,例如浏览器类型,接受的媒体类型等。这些信息可以帮助我们更好地伪装成浏览器,避免被服务器识别为爬虫而被屏蔽。
为了启用这个自定义中间件,我们还需要在settings.py文件中进行相应的设置:
DOWNLOADER_MIDDLEWARES = {
'myproject.middlewares.CustomHeadersMiddleware': 543,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
}
这个中间件的名称是CustomHeadersMiddleware,我们将其设置为了第543个下载中间件。同时,我们还需要禁用Scrapy框架默认的UserAgentMiddleware,否则我们添加的自定义头部信息可能会被覆盖掉。