python爬虫编程习题

自选一个大学,从大学的主页上爬出机构的URL、名称、简要介绍、电话、邮箱。再进入下属机构的主页,从中爬出下属机构的URL、名称、简要介绍、电话、邮箱。所有爬出的数据保存于同一个文件中。
(建议使用beautifulsoup,等基础入门的,因为是初学者一名)

这个倒是同一个模板。。。不过机构和院系比较少,不过研究bs也差不多了

from bs4 import BeautifulSoup
import requests
from openpyxl import Workbook
header = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"}
url = "http://www.bowenedu.cn/xxgk1/xxjj.htm"

data=[]
html=requests.get(url,headers=header).content.decode('utf-8')
soup = BeautifulSoup(html,"html.parser")
name=soup.find('title').text.split('-')[-1]
intro=soup.find('div',{"id":"vsb_content_4"}).text
ps=soup.findAll('p')
phone=""
email=""
for p in ps:
    if '电 话:' in p.text:
        phone=p.text.replace('电 话:','')
    if '电子信箱:' in p.text:
        email=p.text.replace('电子信箱:','')
data.append([url,name,intro,phone,email])

#院部设置
depts=soup.select('div.menu li.item')[3].select('.secNav a')
for dept in depts:
    url=dept.get('href')
    if ".asp" not in url:
        continue
    name=dept.text
    html=requests.get(url,headers=header).content.decode('gb2312')
    soup = BeautifulSoup(html,"html.parser")

    arr=soup.select('#foot')[0].text.strip().split('    ')
    phone=arr[0].replace('学院咨询电话:','')
    email=arr[1].replace('管理员信箱:','')

    introurl='http://tiyu.bowenedu.cn'+str(soup.select('#guide a')[2].get('href'))

    html=requests.get(introurl,headers=header).content.decode('gb2312')
    isoup = BeautifulSoup(html,"html.parser")
    intro=isoup.select('div.down')[0].text

    data.append([url,name,intro,phone,email])



wb=Workbook()
sheet=wb.worksheets[0]#得到默认的工作簿
sheet.title="兰州博文科技学院"#改名称
sheet.append(['URL','名称','简要介绍','电话','邮箱'])
for item in data:
    sheet.append(item)
 
wb.save("兰州博文科技学院.xlsx")
print('采集完毕')

import requests
import csv
from bs4 import BeautifulSoup

# 发送 HTTP 请求,获取 HTML 文档
url = "http://www.example.com"
response = requests.get(url)
html = response.text

# 使用 BeautifulSoup 解析 HTML 文档
soup = BeautifulSoup(html, "html.parser")

# 查找所有机构信息
institutions = soup.find_all("div", class_="institution")

# 遍历机构信息,获取机构的 URL、名称、简要介绍、电话、邮箱
for institution in institutions:
    # 获取机构的 URL
    url = institution.find("a").get("href")
    # 获取机构的名称
    name = institution.find("h3").text
    # 获取机构的简要介绍
    brief = institution.find("p").text
    # 获取机构的电话
    phone = institution.find("span", class_="phone").text
    # 获取机构的邮箱
    email = institution.find("span", class_="email").text
    
    # 将机构信息保存到文件中
    with open("institutions.txt", "a") as f:
        f.write(f"URL: {url}\n")
        f.write(f"名称: {name}\n")
        f.write(f"简要介绍: {brief}\n")
        f.write(f"电话: {phone}\n")
        f.write(f"邮箱: {email}\n\n")

    #打印机构信息
        print(f"URL: {url}\n")
        print(f"名称: {name}\n")
        print(f"简要介绍: {brief}\n")
        print(f"电话: {phone}\n")
        print(f"邮箱: {email}\n")

# 将机构信息保存到 CSV 文件中
with open("institutions.csv", "w", newline="") as f:
    writer = csv.writer(f)
    for institution in institutions:
        url = institution.find("a").get("href")
        name = institution.find("h3").text
        brief = institution.find("p").text
        phone = institution.find("span", class_="phone").text
        email = institution.find("span", class_="email").text
        writer.writerow([url, name, brief, phone, email])

我来找一个学校,写一个

快写好了

1+1=11

您好,我是有问必答小助手,您的问题已经有小伙伴帮您解答,感谢您对有问必答的支持与关注!
PS:问答VIP年卡 【限时加赠:IT技术图书免费领】,了解详情>>> https://vip.csdn.net/askvip?utm_source=1146287632