def parse(self, response, *args):
item = TuiqiuspiderproItem()
resp = response.json()
for article in resp['articles']:
if compTime(article['published_at']) and article['channel'] != "mini_top" and article['is_video'] is False:
item['publish_time'] = article['published_at']
item['tag'] = resp['label']
item['source_href'] = article['share']
item['source'] = "——"
data = scrapy.Request(url=article['share'], callback=self._article, meta={'item': item})
data.meta['item'] = item
yied data
def _article(self, response):
item = response.meta['item']
soup = BeautifulSoup(response.text, 'lxml')
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
try:
article = soup.find("div", {"class": "news-left"})
tips = article.find('p', {"class": "tips"}).find_all('span')
if tips and len(tips) > 1:
item['author'] = tips[1].get_text()
else:
item['author'] = tips[0].get_text()
except Exception as e:
pass
author = response.xpath('.//div[@class="news-left"]/p[@class="tips"]/span/text()').extract_first()
item['author'] = author
item['content'] = response.xpath('.//div[@class="con"]').extract_first()
yield item
pase 中的item每次遍历都是一样的,只有_articel中的item是数据是一样的,怎么解决每次遍历返回的数据