代码运行了,但没有看到输出的CSV文件在哪

问题遇到的现象和发生背景
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from time import sleep
import csv
# 这个爬虫爬取结果的最后几列需要手工处理(可能会多出来几列)
from selenium.webdriver.chrome.webdriver import WebDriver


def find_elements(param):
    pass


def find_element_by_name(param):
    pass


def get_infos(ID):
    driver.get(r"http://192.168.3.252/xhlisweb-inspection_id/XHlisWebReport.aspx")
    sleep(1)
    driver.find_element_by_name("txtoutpatient_id").send_keys(ID)
    driver.find_element_by_name("btnConfirm").click()
    cols = driver.find_elements_by_xpath('''//tr[contains(@onclick, "return btnClick")]''')
    times = len(cols)

    # 思路:挨个去点击一行的病员号,然后获取下方表格的信息
    # print(driver.page_source)
    # cols = driver.find_elements_by_xpath("//td[text()=" + ID + "]")

    # col = cols[4]
    # col_info = col.text.split(' ')[:14]
    # col.click()
    # items = driver.find_elements_by_xpath("//div[@id='report-content']//tbody//tr")[1:]
    # item = items[0]

    infos = []
    for i in range(times):
        driver.get(r"http://192.168.3.252/xhlisweb-inspection_id/XHlisWebReport.aspx")
        sleep(2)
        driver.find_element_by_name("txtoutpatient_id").send_keys(ID)
        driver.find_element_by_name("btnConfirm").click()
        cols = driver.find_elements_by_xpath('''//tr[contains(@onclick, "return btnClick")]''')
        col = cols[i]
        col_info = col.text.split(' ')[:14]
        col.click()
        items = driver.find_elements_by_xpath("//div[@id='report-content']//tbody//tr")[1:]
        for item in items:
            a = item.text.split(' ')
            try:
                a.remove('')
            except:
                pass
            # 这里要做点长度判断,如果a的长度大于7,那就截断;如果不够,就填充''
            if len(a) <= 7:
                for i in range(7-len(a)):
                    a.append('')
            else:
                a = a[:7]
            infos.append([ID] + col_info + a)

    return infos


# start最小为0, end最大为641
start = 200
end = 641

data: object = pd.read_excel(r"C:\Users\cc\Desktop\资料\数据录入\ALL_raw.xlsx")
IDs = data['登记号'].tolist()[start:end]
IDs = ["0005248871", '0010610644']


options = Options()
options.binary_location = r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"
s = Service(r"C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe")
driver = webdriver.Chrome(service=s)
driver.maximize_window()

ALL = []
for ID in IDs:

    try:
        infos = get_infos(ID)
        ALL += infos
    except:
        pass

headers = ['ID', '序号', '检验单', '病员号', '类型', '送检', '目的', '姓名', '性别', '年龄', '科别', '病区', '工作组', '审核人员', '审核日期', '审核时间', 'NO', '英文名称', '检验项目', '结果', '单位', '状态', '参考值']
with open(r"result_检验_" + str(start) + "_" + str(end) +".csv", 'w', newline='') as f:
    f_csv = csv.writer(f)
    f_csv.writerow(headers)
    for i in ALL:
        f_csv.writerow(i)


sleep(3)
driver.quit()


我想要达到的结果

没有看到爬取的文件在哪?如果想要获得爬取的CSV文件,代码该如何写呢?

with open(r"result_检验_" + str(start) + "_" + str(end) +".csv", 'w', newline='') as f:
当前py 同目录下。