请帮助一下Python爬取房天下的数据的输出

Dear All 大佬:

我Python小白,我在Python中文社区上 Down_Load 一段爬取房天下新房数据的代码

但是代码是做用Text格式进行了线性输出,未对代码进行封装且无法按字段输出到Excel

请各位帮忙修改一下代码(实现文本解析后按Excel表头字段输出为xlsx)谢谢,绝不白嫖,有偿~

以下是代码部分(注:The Following Artical is from 菜J学Python Author J哥、燕子,版权归属)

Proxy & Agent

import requests  #请求数据
from pyquery import PyQuery as pq  #本次采用pyquery和re解析数据
import time
import re
import random

global user_agents
global proxy_list
user_agents = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
    "Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3",
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
    "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",] #虚拟代理服务器

proxy_list = ["218.91.13.2:46332",
              "121.31.176.85:8123",
              "218.71.161.56:80",
              "49.85.1.230:28643",
              "115.221.121.165:41674",
              "123.55.177.237:808"] #虚拟路劲IP地址

 Get 房源ID部分:

def get_id(city):
    url = 'https://' + city + '.newhouse.fang.com/house/s/b91'
    user_agent = random.choice(user_agents)
    header = {'User-Agent': user_agent}
    proxy = {'Proxies': random.choice(proxy_list)}
    r = requests.get(url, headers=header, proxies=proxy)
    time.sleep(2)
    r.encoding = 'GBK'
    pattern1 = re.compile('(?<=现有新楼盘)\d+')
    total = int(re.findall(pattern1, r.text)[0]) // 20 + 1
    idlist = []
    for i in range(1, total + 1):
        url = 'https://' + city + '.newhouse.fang.com/house/s/b9' + str(i)
        user_agent = random.choice(user_agents)
        header = {'User-Agent': user_agent}
        proxy = {'Proxies': random.choice(proxy_list)}
        r = requests.get(url, headers=header, proxies=proxy)
        time.sleep(2)
        r.encoding = 'gb2312'
        pattern = re.compile('(?<=loupan/)\d+')
        id = re.findall(pattern, r.text)
        for j in id:
            idlist.append(j)
    # print(idlist)
    return idlist

Get Main Data 获取主要信息部分:

def get_data(city, id):
    url = 'https://' + city + '.newhouse.fang.com/loupan/' + id + '/housedetail.htm'
    user_agent = random.choice(user_agents)
    header = {'User-Agent': user_agent}
    proxy = {'Proxies': random.choice(proxy_list)}
    r = requests.get(url, headers=header, proxies=proxy)
    time.sleep(1)
    r.encoding = 'utf8'
    doc = pq(r.text)
    # print(doc)
    data1 = doc('.ts_linear').items()
    for i in data1:
        print(i.text())
    data1 = doc('.list').items()
    for i in data1:
        print(i.text())

主程序调用:

id = get_id('gz')
for i in range(len(id)):
    get_data('gz', id[i]

请大佬帮助按照字段提取并装入excel(当然我也想过直接复制粘贴文本进excel再处理,但是觉得过于弱智了)Excel示例字段如下:

(注:The Following Artical is from 菜J学Python Author J哥、燕子,版权归属)

这个问题难倒是不难,就是挺费时间,只能给一些思路和建议。

1、首先获取id的代码有些问题,获取的id是有重复的,可以这样写:

def get_id(city):
    url = 'https://' + city + '.newhouse.fang.com/house/s/b91'
    user_agent = random.choice(user_agents)
    header = {'User-Agent': user_agent}
    #proxy = {'Proxies': random.choice(proxy_list)}
    r = requests.get(url, headers=header)
    time.sleep(2)
    r.encoding = 'GBK'
    pattern1 = re.compile('(?<=现有新楼盘)\d+')
    total = int(re.findall(pattern1, r.text)[0]) // (20 + 1)
    idlist = []
    
    for i in range(1,total+1):
        url = 'https://' + city + '.newhouse.fang.com/house/s/b9' + str(i)
        user_agent = random.choice(user_agents)
        header = {'User-Agent': user_agent}
        #proxy = {'Proxies': random.choice(proxy_list)}
        r = requests.get(url, headers=header)
        time.sleep(2)
        r.encoding = 'gb2312'
        pattern = re.compile('<li id="lp_(.*?)"  ')
        id = re.findall(pattern, r.text)
        for j in id:
            idlist.append(j)
    # print(idlist)
    return idlist

2、获取每个id对应页面的信息,定义如下函数

def get_data(city, id):
    url = 'https://' + city + '.newhouse.fang.com/loupan/' + id + '/housedetail.htm'
    user_agent = random.choice(user_agents)
    header = {'User-Agent': user_agent}
    #proxy = {'Proxies': random.choice(proxy_list)}
    r = requests.get(url, headers=header)
    time.sleep(1)
    r.encoding = 'utf8'
    doc = pq(r.text)
    return doc

3、从获得的页面信息中提取各字段的信息,可以定义以下函数

def get_value(doc,id):
    print(id)
    pat1='物业类别:\n(.*?)\n项目特色:\n(.*?)\n建筑类别:\n(.*?)\n装修状况:\n(.*?)\n产权年限:\n(.*?)\n环线位置:\n.*?\n开发 商:\n(.*?)\n楼盘地址:\n(.*?)\n.*?交通(.*).*?综合商场(.*?)\n'
    pat2='销售状态:\n(.*?)\n开盘时间:\n(.*?)\n交房时间:\n(.*?)\n售楼地址:\n.*?\n咨询电话:\n(.*?)\n主力户型:\n(.*?)\n预售许可证:'
    pat3='占地面积:\n(.*?)\n建筑面积:\n(.*?)\n容积率:\n(.*?)\n绿化率:\n(.*?)\n停车位:\n(.*?)\n楼栋总数:\n(.*?)\n物业公司:\n(.*?)\n物业费:\n(.*?)\n物业费描述'
    #pat4='交通(.*).*?综合商场(.*?)\n'
    pat_list=[pat1,pat2,pat3]

    fields=()
    length=len(list(doc('.list').items()))

    for i in range(length):
        data1 = doc('.list').items()
        rst=list(data1)[i].text()
        fields+=re.findall(pat_list[i],rst,re.S)[0]
        #print(len(fields))

    data=[id]+list(fields)
    info_df=pd.DataFrame(data).T
    names=['房天下ID','物业类别','项目特色','建筑类别','装修状况','产权年限','开发 商','楼盘地址','交通','配套商场','销售状态','开盘时间','交房时间',
           '咨询电话','主力户型','占地面积','建筑面积','容积率','绿化率','停车位','楼栋总数','物业公司','物业费']
    info_df.columns=names
    return info_df

4、主程序中调用

id_list=get_id('tj')

infoAll=pd.DataFrame()

for i in range(len(id_list)):
    doc=get_data('tj', id_list[i])
    info_df=get_value(doc,id_list[i])
    infoAll=pd.concat([infoAll,info_df],axis=0)

info_df.to_excel('房天下.xlsx',index=None)

这是一个大概的思路,具体还需要修改步骤3中的代码,因为不是每个页面都有所有的字段。

您好,我是有问必答小助手,你的问题已经有小伙伴为您解答了问题,您看下是否解决了您的问题,可以追评进行沟通哦~

如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~

ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632

非常感谢您使用有问必答服务,为了后续更快速的帮您解决问题,现诚邀您参与有问必答体验反馈。您的建议将会运用到我们的产品优化中,希望能得到您的支持与协助!

速戳参与调研>>>https://t.csdnimg.cn/Kf0y

想问一下 这个问题还需要吗?

要实现随机封装User-Agent的操作并不需要如此繁琐,请考虑下载 fake_useragent 包。

# 示例
from fake_useragent import UserAgent

# 执行此命令会随机返回User-agent
UserAgent().random

 

此外,对于 global 关键字的使用有明显的歧义:

global user_agents

global proxy_list

你所创建的 user-agents 以及 proxy_list 变量是全局变量,因此没有必要使用 global 关键字声明。

user-agent = '1'

def demo():
    # 在函数局部作用域内,你可以调用全局作用域中的变量
    print(user-agent)

该关键字的作用是将函数中的局部变量声明为全局变量:

def demo():
    global user_agent

    user_agent = '1'

# 当函数被调用后,user-agent将作为全局变量使用
demo()
print(user_agent)