#获取了页面所有链接,但是不能去获取一个一个链接里面的数据
import requests
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Firefox()
driver.get('https://www.amazon.de/')
word = input('请输入你需要的关键词:')
driver.find_element(by=By.NAME, value="field-keywords").send_keys(word)
sleep(2)
driver.find_element(By.XPATH, "//input[@type='submit']").click()
driver.find_element(By.ID, "nav-search-submit-button").click()
url = 'https://www.amazon.de/s?k={}'.format(word)
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/112.0',
'Referer': 'https://www.amazon.de/'
}
res = requests.get(url=url, headers=headers)
html_data = res.text
for links in driver.find_elements(By.XPATH,
'//*[@class="a-link-normal s-underline-text s-underline-link-text s-link-style '
'a-text-normal"]'):
sleep(1)
print(links.get_attribute('href'))
a = []
for links in driver.find_elements(By.XPATH,
'//*[@class="a-link-normal s-underline-text s-underline-link-text s-link-style '
'a-text-normal"]'):
sleep(1)
print(links.get_attribute('href'))
a.append(links.get_attribute('href'))
driver.find_element(By.XPATH, '//*[@class="a-link-normal s-underline-text s-underline-link-text s-link-style '
'a-text-normal"]').click()
driver.find_element(By.ID, "sellerProfileTriggerId").click()
box = driver.find_element(By.XPATH, "/html/body/div[1]/div[2]/div/div/div/div/div[9]/div/div/div").text
print(box)
driver.back()
driver.back()
for i in adriver.find_element(By.XPATH, '//*[@class="a-link-normal s-underline-text s-underline-link-text s-link-style '
'a-text-normal"]').click():
print(i)
element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '//*[@class="a-link-normal s-underline-text s-underline-link-text s-link-style a-text-normal"]')))
element.click()
#问题报错为
Traceback (most recent call last):
File "C:/Users/Administrator/PycharmProjects/pythonProject/amzone/进阶.py", line 36, in <module>
print(links.get_attribute('href'))
File "F:\venv\lib\site-packages\selenium\webdriver\remote\webelement.py", line 179, in get_attribute
f"/* getAttribute */return ({getAttribute_js}).apply(null, arguments);", self, name
File "F:\venv\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 500, in execute_script
return self.execute(command, {"script": script, "args": converted_args})["value"]
File "F:\venv\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 440, in execute
self.error_handler.check_response(response)
File "F:\venv\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 245, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.StaleElementReferenceException: Message: The element with the reference d629e098-386b-4e55-abac-2271d0ca6c39 is stale; either its node document is not the active document, or it is no longer connected to the DOM
Stacktrace:
RemoteError@chrome://remote/content/shared/RemoteError.sys.mjs:8:8
WebDriverError@chrome://remote/content/shared/webdriver/Errors.sys.mjs:182:5
StaleElementReferenceError@chrome://remote/content/shared/webdriver/Errors.sys.mjs:484:5
element.getKnownElement@chrome://remote/content/marionette/element.sys.mjs:488:11
deserializeJSON@chrome://remote/content/marionette/json.sys.mjs:233:33
cloneObject/result<@chrome://remote/content/marionette/json.sys.mjs:50:52
cloneObject@chrome://remote/content/marionette/json.sys.mjs:50:25
deserializeJSON@chrome://remote/content/marionette/json.sys.mjs:244:16
cloneObject@chrome://remote/content/marionette/json.sys.mjs:56:24
deserializeJSON@chrome://remote/content/marionette/json.sys.mjs:244:16
json.deserialize@chrome://remote/content/marionette/json.sys.mjs:248:10
receiveMessage@chrome://remote/content/marionette/actors/MarionetteCommandsChild.sys.mjs:85:30
#初步解决driver.refresh和time.sleep但是还是出现原有的报错
引用chatGPT作答,这个报错是元素过期(stale element)引起的。这通常是因为在获取元素后,页面发生了变化,导致原有的元素无法再被使用。
你需要重新获取需要点击的元素,而不是直接使用之前获取的元素。
另外,你在点击链接后获取数据时,也需要等待页面加载完成再获取元素,否则可能会出现找不到元素的错误。你可以使用WebDriverWait等待特定的元素出现。
以下是修改后的代码示例,你可以参考一下:
import requests
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get('https://www.amazon.de/')
word = input('请输入你需要的关键词:')
driver.find_element(by=By.NAME, value="field-keywords").send_keys(word)
sleep(2)
driver.find_element(By.CSS_SELECTOR, "input.nav-input[type='submit']").click()
# 等待搜索结果加载完成
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.s-search-results")))
# 获取搜索结果的链接
links = driver.find_elements(By.CSS_SELECTOR, 'a.a-link-normal.s-no-outline')
url_list = [link.get_attribute('href') for link in links]
# 点击链接获取数据
for url in url_list:
driver.get(url)
# 等待页面加载完成
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "productTitle")))
# 获取数据
seller_link = driver.find_element(By.ID, "sellerProfileTriggerId").get_attribute('href')
print(seller_link)
driver.quit()
在这个代码中,我使用了By.CSS_SELECTOR来获取元素,并且使用了WebDriverWait等待特定的元素出现。在循环中,我获取每个链接,然后等待页面加载完成后再获取数据。
以下答案由GPT-3.5大模型与博主波罗歌共同编写:
报错信息提示是StaleElementReferenceException,这是由于Selenium获取到的元素不在当前DOM中,请尝试使用显示等待方法来等待元素的加载。
另外,你在循环链接的时候,使用的是driver对象,建议使用requests库来对链接发起请求,并使用BeautifulSoup库来解析页面获取所需数据。
修改后的代码如下(仅供参考):
```
import requests
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get('https://www.amazon.de/')
word = input('请输入你需要的关键词:')
driver.find_element(by=By.NAME, value="field-keywords").send_keys(word)
sleep(2)
driver.find_element(By.XPATH, "//input[@type='submit']").click()
driver.find_element(By.ID, "nav-search-submit-button").click()
url = 'https://www.amazon.de/s?k={}%27.format(word)
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/112.0',
'Referer': 'https://www.amazon.de/'
}
res = requests.get(url=url, headers=headers)
html_data = res.text
soup = BeautifulSoup(html_data, "html.parser")
links_list = []
for links in soup.find_all('a', {'class': 'a-link-normal s-underline-text s-underline-link-text s-link-style a-text-normal'}):
link = links.get('href')
if link.startswith('/'):
link = 'https://www.amazon.de%27/ + link
links_list.append(link)
for link in links_list:
res = requests.get(url=link, headers=headers)
html_data = res.text
soup = BeautifulSoup(html_data, "html.parser")
try:
box = soup.find("div", {"id": "sel
你代码的36行
刚好经过了一次click(), 所以这个时候需要等待click()这个动作的完成,以及其引起的页面跳转,或者页面更新的完成。
所以你可以用WebDriverWait来完成这个等待:
我们没有对软件进行完全测试,实际就是选择了风险,因为缺陷极有可能存在没有进行测试的部分。举个例子,程序员为了方便,在调试程序时会弹出一些提示信息框,而这些提示只在某种条件下会弹出,碰巧程序发布前这些代码中的一些没有被注释掉。在测试时测试工程师又没有对其进行测试。如果客户碰到它,这将是代价昂贵的缺陷,因为交付后才被客户发现。
因此,我们要尽可能的选择最合适的测试量,把风险降低到最小。
可以使用Python中的requests库和BeautifulSoup库来实现从网页中抓取数据的任务。以下是具体步骤:
例:使用requests库获取网页HTML源码
import requests
url = "https://www.example.com/news/123" # 假设该网址为新闻网站中的一篇文章
response = requests.get(url)
html = response.text # 获取HTML源码
例:使用BeautifulSoup库获取新闻网页的标题、时间、作者、正文
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
title = soup.find('h1', class_='title').text # 获取新闻标题
time = soup.find('span', class_='time').text # 获取发表时间
author = soup.find('div', class_='author').text # 获取作者信息
content = soup.find('div', class_='content').text # 获取新闻正文
例:使用pandas库将新闻数据存储到csv文件中
import pandas as pd
df = pd.DataFrame({'title': [title], 'time': [time], 'author': [author], 'content': [content]})
df.to_csv('news.csv', index=False, encoding='utf-8') # 存储到csv文件中
总结:
以上就是从网页链接中获取特定数据的一般步骤,其中requests和BeautifulSoup是常用的Python库,可以轻松实现从网页中爬取数据的任务。根据不同网页的HTML结构和数据类型,具体的实现方式可能会有所不同。