关于异步爬取:爬取网站出现RuntimeError: Event loop is closed


import asyncio
import time
import aiohttp
import requests
from lxml import etree

urls = 'xxxxx' #链接地址
url = requests.get(urls)
url.encoding = 'gb2312'

async def down_lode(src_01,session):#此处出现了问题,求助大神解决
    name = src_01.split('/',1)[0]
    async with session.get(src_01) as reps:
        async with open(f'2021.09.30&/{name}','wb',) as f:
            await f.writh(await reps.content.read())


async def page_lode(src_):
    rep = requests.get(src_)
    rep.encoding = 'utf-8'
    tree = etree.HTML(rep.text)
    src_ = tree.xpath('//div[@class ="content"]/img/@src')

    tasks = []
    async with aiohttp.ClientSession() as session:
        for href in range(len(src_)):
               src_01 = src_[href]
               c = asyncio.create_task(down_lode(src_01, session))
               tasks.append(c)
        await asyncio.wait(tasks)


async def main(url):

    tree = etree.HTML(url.text)
    src = tree.xpath('//div[@class ="ind2"]//a/@href')
    name = tree.xpath('//div[@class ="ind2"]//a/img/@alt')
    tasks = []
    for src_name_ in range(len(src)):
         src_ = src[src_name_]
         d = asyncio.create_task(page_lode(src_))
         tasks.append(d)
    await asyncio.wait(tasks)


if __name__ == '__main__':

    asyncio.run(main(url))
    asyncio.run(page_lode(src_))

```

把异步改为同步实现就没有问题了。