在本地不含GPU的电脑上跑torch,想把包含cuda的部分去掉。
""" Example for biomarker identification
"""
import os
import copy
from feat_importance import cal_feat_imp, summarize_imp_feat
from utils import load_model_dict
if __name__ == "__main__":
data_folder = 'BRCA'
model_folder = os.path.join(data_folder, 'models')
view_list = [1,2,3]
if data_folder == 'ROSMAP':
num_class = 2
if data_folder == 'BRCA':
num_class = 5
featimp_list_list = []
for rep in range(5):
featimp_list = cal_feat_imp(data_folder, os.path.join(model_folder, str(rep+1)),
view_list, num_class)
featimp_list_list.append(copy.deepcopy(featimp_list))
summarize_imp_feat(featimp_list_list)
AssertionError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_11144/1698963967.py in <module>
18 for rep in range(5):
19 featimp_list = cal_feat_imp(data_folder, os.path.join(model_folder, str(rep+1)),
---> 20 view_list, num_class)
21 featimp_list_list.append(copy.deepcopy(featimp_list))
22 summarize_imp_feat(featimp_list_list)
D:\python\Book\MOGONET-main\feat_importance.py in cal_feat_imp(data_folder, model_folder, view_list, num_class)
33 if cuda:
34 model_dict[m].cuda()
---> 35 model_dict = load_model_dict(model_folder, model_dict)
36 te_prob = test_epoch(data_trte_list, adj_te_list, trte_idx["te"], model_dict)
37 if num_class == 2:
D:\python\Book\MOGONET-main\utils.py in load_model_dict(folder, model_dict)
127 if os.path.exists(os.path.join(folder, module+".pth")):
128 # print("Module {:} loaded!".format(module))
--> 129 model_dict[module].load_state_dict(torch.load(os.path.join(folder, module+".pth"), map_location=torch.device('cpu')))
130 else:
131 print("WARNING: Module {:} from model_dict is not loaded!".format(module))
D:\Ancondada\envs\torch\lib\site-packages\torch\cuda\__init__.py in current_device()
477 def current_device() -> int:
478 r"""Returns the index of a currently selected device."""
--> 479 _lazy_init()
480 return torch._C._cuda_getDevice()
481
D:\Ancondada\envs\torch\lib\site-packages\torch\cuda\__init__.py in _lazy_init()
206 "multiprocessing, you must use the 'spawn' start method")
207 if not hasattr(torch._C, '_cuda_getDeviceCount'):
--> 208 raise AssertionError("Torch not compiled with CUDA enabled")
209 if _cudart is None:
210 raise AssertionError(
AssertionError: Torch not compiled with CUDA enabled
把utils中的 load_model_dict()函数中的map_location改成使用 cpu(上面的已经改过),还是不行
程序能够成功运行
首先确保你装的torch是cpu版本