爬虫的数据提取失败了

爬虫数据提取失败
import requests
from bs4 import BeautifulSoup
import time  # 导入相应的库文件
headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
        ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}   # 加入请求头

def get_info(url):
    '''
    :param url: 参数:要请求的链接
    :return: 空
    '''
    wb_data = requests.get(url, headers=headers)  # 发送网页请求
    soup = BeautifulSoup(wb_data.text, 'lxml')   # 对网页内容进行解析 wb_data.text表示网页源代码
    ranks = soup.select('span.pc_temp_num')
    titles = soup.select('div.pc_temp_songlist > u1 > li >a')
    times = soup.select('span.pc_temp_tips_r > span')
    for rank, title, time in zip(ranks, titles, times):
       data = {
           'rank': rank.get_text().strip(),
           'singer': title.get_text().split('-')[0],
           'song': title.get_text().split('-')[1],
           'time': time.get_text().strip()
       }
       print(data)

if __name__ == '__main__':
    urls = ['https://www.kugou.com/yy/rank/home/{}-8888.html'.format(str(i)) for i in range(1,24)] #酷狗音乐
    for url in urls:
        get_info(url)
    time.sleep(1)


运行结果如下

img

是否是头部访问的地方有问题呢

import requests
from bs4 import BeautifulSoup
import time  # 导入相应的库文件
headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
        ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}   # 加入请求头
 
def get_info(url):
    '''
    :param url: 参数:要请求的链接
    :return: 空
    '''
    wb_data = requests.get(url, headers=headers)  # 发送网页请求
    soup = BeautifulSoup(wb_data.text, 'lxml')   # 对网页内容进行解析 wb_data.text表示网页源代码
    ranks = soup.select('span.pc_temp_num')
    # titles = soup.select('div.pc_temp_songlist > u1 > li >a')
    times = soup.select('span.pc_temp_tips_r > span')
    for rank, time in zip(ranks,  times):
       data = {
           'rank': rank.get_text().strip(),
           # 'singer': title.get_text().split('-')[0],
           # 'song': title.get_text().split('-')[1],
           'time': time.get_text().strip()
       }
       print(data)
 
if __name__ == '__main__':
    urls = ['https://www.kugou.com/yy/rank/home/{}-8888.html'.format(str(i)) for i in range(1,24)] #酷狗音乐
    for url in urls:
        get_info(url)
        time.sleep(1)