帮忙写个python爬数据,内容在网盘里面,请你吃个饭。
写好直接私发给我,能用我马上结题,我选最快且能用的那个发截图到楼里,绝不白piao
这要这个class的,旁边有个很相似的class只要td class="feedbackObj" 的
链接: https://pan.baidu.com/s/1uPfhHcttsc07gzF0OCyPJA?pwd=q5g8 提取码: q5g8 复制这段内容后打开网盘
马上写
爬虫写在哪里
我刚刚好有3个
爬虫1
#encoding=utf-8
import urllib2
import re
class neihanba():
def spider(self):
'''
爬虫的主调度器
'''
isflow=True#判断是否进行下一页
page=1
while isflow:
url="http://www.neihanpa.com/article/list_5_"+str(page)+".html"
html=self.load(url)
self.deal(html,page)
panduan=raw_input("是否继续(y/n)!")
if panduan=="y":
isflow=True
page+=1
else:
isflow=False
def load(self,url):
'''
针对url地址进行全部爬去
:param url: url地址
:return: 返回爬去的内容
'''
header = {
"User-Agent": " Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
request = urllib2.Request(url, headers=header)
response = urllib2.urlopen(request)
html = response.read()
return html
def deal(self,html,page):
'''
对之前爬去的内容进行正则匹配,匹配出标题和正文内容
:param html:之前爬去的内容
:param page: 正在爬去的页码
'''
parrten=re.compile('<li class="piclist\d+">(.*?)</li>',re.S)
titleList=parrten.findall(html)
for title in titleList:
parrten1=re.compile('<a href="/article/\d+.html" rel="external nofollow" >(.*)</a>')
ti1=parrten1.findall(title)
parrten2=re.compile('<div class="f18 mb20">(.*?)</div>',re.S)
til2=parrten2.findall(title)
for t in ti1:
tr=t.replace("<b>","").replace("</b>","")
self.writeData(tr,page)
for t in til2:
tr=t.replace("<p>","").replace("</p>","").replace("<br>","").replace("<br />","").replace("&ldquo","\"").replace("&rdquo","\"")
self.writeData(tr,page)
def writeData(self,context,page):
'''
将最终爬去的内容写入文件中
:param context: 匹配好的内容
:param page: 当前爬去的页码数
'''
fileName = "di" + str(page) + "yehtml.txt"
with open(fileName, "a") as file:
file.writelines(context + "\n")
if __name__ == '__main__':
n=neihanba()
n.spider()
爬虫2
#encoding=utf-8
import urllib
import urllib2
import re
class zhiLian():
def spider(self,position,workPlace):
'''
爬虫的主调度器
:param position: 职位
:param workPlace: 工作地点
'''
url="http://sou.zhaopin.com/jobs/searchresult.ashx?"
url+=urllib.urlencode({"jl":workPlace})
url+="&"
url+=urllib.urlencode({"kw":position})
isflow=True#是否进行下一页的爬去
page=1
while isflow:
url+="&"+str(page)
html=self.load(url)
self.deal1(html,page)
panduan = raw_input("是否继续爬虫下一页(y/n)!")
if panduan == "y":
isflow = True
page += 1
else:
isflow = False
def load(self,url):
'''
针对url地址进行全部爬去
:param url: url地址
:return: 返回爬去的内容
'''
header = {
"User-Agent": " Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
request = urllib2.Request(url, headers=header)
response = urllib2.urlopen(request)
html = response.read()
return html
def deal1(self,html,page):
'''
对之前爬去的内容进行正则匹配,匹配职位所对应的链接
:param html:之前爬去的内容
:param page: 正在爬去的页码
'''
parrten=re.compile('<a\s+style="font-weight:\s+bold"\s+par="ssidkey=y&ss=\d+&ff=\d+&sg=\w+&so=\d+"\s+href="(.*?)" rel="external nofollow" target="_blank">.*?</a>',re.S)
til=parrten.findall(html)#爬去链接
for t in til:
self.deal2(t,page)
def deal2(self,t,page):
'''
进行二次爬虫,然后在新的页面中对公司、薪资、工作经验进行匹配
:param t: url地址
:param page: 当前匹配的页数
'''
html=self.load(t)#返回二次爬虫的内容
parrten1=re.compile('<a\s+onclick=".*?"\s+href=".*?" rel="external nofollow" \s+target="_blank">(.*?)\s+.*?<img\s+class=".*?"\s+src=".*?"\s+border="\d+"\s+vinfo=".*?"></a>',re.S)
parrten2=re.compile('<li><span>职位月薪:</span><strong>(.*?) <a.*?>.*?</a></strong></li>',re.S)
parrent3=re.compile('<li><span>工作经验:</span><strong>(.*?)</strong></li>',re.S)
til1=parrten1.findall(html)
til2=parrten2.findall(html)
til3=parrent3.findall(html)
str=""
for t in til1:
t=t.replace('<img title="专属页面" src="//img03.zhaopin.cn/2012/img/jobs/icon.png" border="0" />',"")
str+=t
str+="\t"
for t in til2:
str+=t
str += "\t"
for t in til3:
str+=t
self.writeData(str,page)
def writeData(self,context,page):
'''
将最终爬去的内容写入文件中
:param context: 匹配好的内容
:param page: 当前爬去的页码数
'''
fileName = "di" + str(page) + "yehtml.txt"
with open(fileName, "a") as file:
file.writelines(context + "\n")
if __name__ == '__main__':
position=raw_input("请输入职位:")
workPlace=raw_input("请输入工作地点:")
z=zhiLian()
z.spider(position,workPlace)
爬虫3
#encoding=utf-8
import urllib
import urllib2
import re
class teiba():
def spider(self,name,startPage,endPage):
url="http://tieba.baidu.com/f?ie=utf-8&"
url+=urllib.urlencode({"kw":name})
for page in range(startPage,endPage+1):
pn=50*(page-1)
urlFull=url+"&"+urllib.urlencode({"pn":pn})
html=self.loadPage(url)
self.dealPage(html,page)
def loadPage(self,url):
header={
"User-Agent":" Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
request=urllib2.Request(url,headers=header)
response=urllib2.urlopen(request)
html=response.read()
return html
def dealPage(self,html,page):
partten=re.compile(r'<a\s+rel="noreferrer"\s+href="/p/\d+" rel="external nofollow" \s+title=".*?"\s+target="_blank" class="j_th_tit\s+">(.*?)</a>',re.S)
titleList=partten.findall(html)
rstr=r'<span\s+class="topic-tag"\s+data-name=".*?">#(.*?)#</span>'
for title in titleList:
title=re.sub(rstr,"",title)
self.writePage(title,page)
def writePage(self,context,page):
fileName="di"+str(page)+"yehtml.txt"
with open(fileName,"a") as file:
file.writelines(context+"\n")
if __name__ == '__main__':
name=raw_input("请输入贴吧名:")
startPage=raw_input("请输入起始页:")
endPage=raw_input("请输入终止页:")
t=teiba()
t.spider(name,int(startPage),int(endPage))
我试试
爬取listid
def spyder_listid():
headers= {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Tablet PC 2.0; wbx 1.0.0; wbxapp 1.0.0; Zoom 3.6.0)"
}
response = requests.get('https://www.okooo.com/livecenter/football/',headers=headers)
text = response.text.encode(response.encoding).decode(response.apparent_encoding)
global listid
listid = re.findall('match_detail_([0-9]*)',text)
爬取listeam
def spyder_listteam(id):
headers= {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Tablet PC 2.0; wbx 1.0.0; wbxapp 1.0.0; Zoom 3.6.0)"
}
response = requests.get('https://www.okooo.com/soccer/match/%s/ah/' % (id),headers=headers)
text = response.text.encode(response.encoding).decode(response.apparent_encoding)
listteam[id] = (re.findall(r'<div class="qpai_zi jsTeamName">(.*)</div>',text)[0],re.findall(r'<div class="qpai_zi_1 jsTeamName">(.*)</div>',text)[0])
print(listteam[id])
根据id爬取listtime和listshui
def spyder_idpage(id):
headers= {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Tablet PC 2.0; wbx 1.0.0; wbxapp 1.0.0; Zoom 3.6.0)"
}
response = requests.get('https://www.okooo.com/soccer/match/%s/ah/ajax/?page=0&trnum=0&companytype=BaijiaBooks' % (id),headers=headers)
# text = response.text.encode(response.encoding).decode(response.apparent_encoding)
text = response.text
resultlist = re.findall(r'<td class="feedbackObj" title="开盘时间: 赛前([0-9]*)时([0-9]*)分"><a href="javascript:void\(0\);" class="nolink "><span attval="(.*)">(.*)</span></a></td>',text)
listtime[id] = []
listshui[id] = []
for result in resultlist:
listtime[id].append(result[0] + '时' + result[1] + '分')
listshui[id].append(result[2])
整合版
import requests
import re
# import openpyxl
# import pandas as pd
# import os
# import pymysql
# import mysql.connector
# from bs4 import BeautifulSoup
# import pandas as pd
#取当日赛程ID到列表listid (元素中的match_detail_1176180(1176180为ID,只保留数字ID))和主队队名到列表listteam()
#https://www.okooo.com/livecenter/football/
#将上面取到的赛程ID拼接到https://www.okooo.com/soccer/match/XXXXXX/ah/中的XXXXXX,再取这个地址的源码
#将源码中所有类似这样的<td class="feedbackObj" title="开盘时间: 赛前268时33分"><a href="javascript:void(0);" class="nolink "><span attval="受平手/半球">受平手/半球</span></a></td> 取出268时33分到列表listtime,取出“受平手/半球”到listshui。
#最终取到listid,listteam,listtime,listshui。再取系统时间到mydate
#将listid,mydate,listteam,listtime,listshui插入到数据库'zq'中对应的的id,date,name,time,shui即可。
# 连接数据库
#conn = mysql.connector.connect(
# user='root',
# password='qq438443820',
# host='gz-cdb-k87y5qy1.sql.tencentcdb.com',
# port='63938',
# database='zq'
#)
# 创建游标
#cursor = conn.cursor()
# 遍历每一个列表并执行SQL语句
#query = "INSERT INTO zq (id,date, name, time, shui) VALUES (%s, %s, %s, %s, %s)"
#for val in zip(list3, list4, list1, list2):
# cursor.execute("INSERT INTO zq (id,date, name, time, shui) VALUES (%s, %s, %s, %s, %s)", val)
#提交数据库
#conn.commit()
#关闭游标关闭连接
#cursor.close()
#conn.close()
listid = []
listtime = {}
listshui = {}
listteam = {}
def spyder_listid():
headers= {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Tablet PC 2.0; wbx 1.0.0; wbxapp 1.0.0; Zoom 3.6.0)"
}
response = requests.get('https://www.okooo.com/livecenter/football/',headers=headers)
text = response.text.encode(response.encoding).decode(response.apparent_encoding)
global listid
listid = re.findall('match_detail_([0-9]*)',text)
def spyder_listteam(id):
headers= {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Tablet PC 2.0; wbx 1.0.0; wbxapp 1.0.0; Zoom 3.6.0)"
}
response = requests.get('https://www.okooo.com/soccer/match/%s/ah/' % (id),headers=headers)
text = response.text.encode(response.encoding).decode(response.apparent_encoding)
try:
listteam[id] = (re.findall(r'<div class="qpai_zi jsTeamName">(.*)</div>',text)[0],re.findall(r'<div class="qpai_zi_1 jsTeamName">(.*)</div>',text)[0])
except:
return False
return True
def spyder_idpage(id):
headers= {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Tablet PC 2.0; wbx 1.0.0; wbxapp 1.0.0; Zoom 3.6.0)"
}
response = requests.get('https://www.okooo.com/soccer/match/%s/ah/ajax/?page=0&trnum=0&companytype=BaijiaBooks' % (id),headers=headers)
text = response.text
resultlist = re.findall(r'<td class="feedbackObj" title="开盘时间: 赛前([0-9]*)时([0-9]*)分"><a href="javascript:void\(0\);" class="nolink "><span attval="(.*)">(.*)</span></a></td>',text)
listtime[id] = []
listshui[id] = []
for result in resultlist:
listtime[id].append(result[0] + '时' + result[1] + '分')
listshui[id].append(result[2])
if __name__ == "__main__":
spyder_listid()
for id in listid:
spyder_listteam(id)
spyder_idpage(id)
你那网页好像进不去呀
现在有了吗?马上写
import requests
import re
from random import choice
import time
url = r'https://www.okooo.com/livecenter/football/'
usa = ['Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',]
header = {'User-Agent': choice(usa),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Content-Type': 'application/x-javascript'}
response = requests.get(url, headers = header)
response.encoding = 'gbk'
content = response.text
URL = 'https://www.okooo.com/soccer/match/{}/ah/'
result = re.findall(r'match_detail_(\d+).*?<a.*?"ctrl_homename jsJumpTo" reversion="0">(.*?)</a>.*?<a.*?"ctrl_awayname jsJumpTo" reversion="0">(.*?)</a>', content, re.DOTALL )
for ID, m, c in result:
URL = URL.format(ID)
curURL = URL + r'ajax/?page=0&trnum=0&companytype=BaijiaBooks'
res = requests.get(curURL, headers = header)
res.encoding = 'utf-8'
con1 = res.text
res2 = re.findall(r'<td class="feedbackObj" title="(.*?)">.*?attval.*?>(.*?)</span></a></td>', con1, re.DOTALL)
for j, k in res2:
print(f"ID:{ID} mainteam:{m} time:{j} shui:{k} curTime:{time.strftime('%Y年%m月%d日',time.localtime())}")
#encoding=utf-8
import urllib2
import re
class neihanba():
def spider(self):
fileName = "di" + str(page) + "yehtml.txt"
with open(fileName, "a") as file:
file.writelines(context + "\n")
if __name__ == '__main__':
n=neihanba()
n.spider()
py爬出数据注意点。