python 的csv 写入不了

# -*- coding: utf-8 -*-
import requests
from lxml import etree
import json
import csv
fp = open('./腾讯新闻.csv','w+', newline='', encoding='utf-8')
writer = csv.writer(fp)
writer.writerow(('标题', '更新时间', '出版时间', '网址', '评论数', '媒体', '类别', '主类别'))
def get_infos(self):
        url = 'https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?sub_srv_id=world&srv_id=pc&offset=0&limit=20&strategy=1&ext={%22pool%22:[%22high%22,%22top%22],%22is_filter%22:10,%22check_type%22:true}'
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537..36 Edg/91.0.864.37'}
        res = requests.get(url, headers=headers)
        res.encoding = 'utf-8'
        data = json.loads(res.content.decode("utf-8"))
        titles = [i['title'] for i in data['data']['list']]
        updates = [i['update_time'] for i in data['data']['list']]
        publishs = [i['publish_time'] for i in data['data']['list']]
        urls = [i['url'] for i in data['data']['list']]
        comments = [i['comment_num'] for i in data['data']['list']]
        medias = [i['media_name'] for i in data['data']['list']]
        categorys = [i['category_cn'] for i in data['data']['list']]
        sub_categorys = [i['sub_category_cn'] for i in data['data']['list']]
        for title, update,publish,url,comment,media,category,sub_category in zip(titles,updates,publishs,urls,comments,medias,categorys,sub_categorys):


                info = {
                        '标题': title,
                        '更新时间': update,
                        '出版时间': publish,
                        '网址': url,
                        '评论数': comment,
                        '媒体': media,
                        '类别': category,
                        '主类别': sub_category
                }
                print(info)
                # writer.writerow([title, update, publish, url, comment, media, category, sub_category])
                writer.writerow([title, update, publish, url, comment, media, category, sub_category])
#urls_info = ['https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?sub_srv_id=world&srv_id=pc&offset={}&limit=20&strategy=1&ext={%22pool%22:[%22top%22],%22is_filter%22:2,%22check_type%22:true}'.format(str(i)) for i in range(0, 200, 20)]

if __name__ == '__main__':
        urls_info = [f'https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?sub_srv_id=world&srv_id=pc&offset={i}&limit=20&strategy=1&ext=' + '{%22pool%22:[%22top%22],%22is_filter%22:2,%22check_type%22:true}' for i in range(0, 200, 20)]
        for urls_info1 in urls_info:
                get_infos(urls_info)

        fp.close()

 

求大佬教一下,为什么从腾讯新闻网爬下来的数据 csv写不进,把代码发我一下,十分感谢!!!

在同级目录下创建"腾讯新闻.csv"

把fp = open('./腾讯新闻.csv','w+', newline='', encoding='utf-8')

改成fp = open('腾讯新闻.csv','w+', newline='', encoding='utf-8')

之后运行试试。

简而言之就是第6行的路径不对,同级目录不用加 ./

您好,我是有问必答小助手,您的问题已经有小伙伴解答了,您看下是否解决,可以追评进行沟通哦~

如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~

ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632