这是train.py里的部分代码
# Define Loss function.
loss_func = BFLoss()
** system = BeamTasNetSystem(pretrain=conf['main_args']['pretrain'], **
model=model, loss_func=loss_func, optimizer=optimizer,
train_loader=train_loader, val_loader=val_loader,
scheduler=scheduler, config=conf)
# Define callbacks
checkpoint_dir = os.path.join(exp_dir, 'checkpoints/')
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=5, verbose=1)
early_stopping = False
if conf['training']['early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
这是 "D:\ZhangXu\Code\Beam-Guided-TasNet-main\Beam-Guided-TasNet-main\system.py"的错误行代码
class BeamTasNetSystem(System):
** def __init__(self, pretrain, *args, **kwargs):**
super().__init__(*args, **kwargs)
self.pretrain = pretrain
这是C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\asteroid\engine\system.py"的错误行代码
default_monitor: str = "val_loss"
def __init__(
self,
model,
optimizer,
loss_func,
train_loader,
val_loader=None,
scheduler=None,
config=None,
):
super().__init__()
self.model = model
self.optimizer = optimizer
self.loss_func = loss_func
self.train_loader = train_loader
self.val_loader = val_loader
self.scheduler = scheduler
self.config = {} if config is None else config
# hparams will be logged to Tensorboard as text variables.
# summary writer doesn't support None for now, convert to strings.
# See https://github.com/pytorch/pytorch/issues/33140
** self.hparams = Namespace(**self.config_to_hparams(self.config))**
def forward(self, *args, **kwargs):
"""Applies forward pass of the model.
Returns:
:class:`torch.Tensor`
"""
return self.model(*args, **kwargs)
这是"C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\torch\nn\modules\module.py"的错误行代码
def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None:
def remove_from(*dicts_or_sets):
for d in dicts_or_sets:
if name in d:
if isinstance(d, dict):
del d[name]
else:
d.discard(name)
params = self.__dict__.get('_parameters')
if isinstance(value, Parameter):
if params is None:
raise AttributeError(
"cannot assign parameters before Module.__init__() call")
remove_from(self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set)
self.register_parameter(name, value)
elif params is not None and name in params:
if value is not None:
raise TypeError("cannot assign '{}' as parameter '{}' "
"(torch.nn.Parameter or None expected)"
.format(torch.typename(value), name))
self.register_parameter(name, value)
else:
modules = self.__dict__.get('_modules')
if isinstance(value, Module):
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() call")
remove_from(self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError("cannot assign '{}' as child module '{}' "
"(torch.nn.Module or None expected)"
.format(torch.typename(value), name))
modules[name] = value
else:
buffers = self.__dict__.get('_buffers')
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, torch.Tensor):
raise TypeError("cannot assign '{}' as buffer '{}' "
"(torch.Tensor or None expected)"
.format(torch.typename(value), name))
buffers[name] = value
else:
** object.__setattr__(self, name, value)**
./run.sh --id 0,1,2,3 --stage 3
Results from the following experiment will be stored in exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662
Stage 3: Training
Global seed set to 0
Traceback (most recent call last):
File "train.py", line 137, in <module>
{'data': {'bf_dir': None,
'mode': 'min',
'n_src': 2,
'sample_rate': 8000,
'task': 'reverb2reverb',
'train_dir': 'data/2speakers/wav8k/min/tr',
'valid_dir': 'data/2speakers/wav8k/min/cv'},
'filterbank': {'kernel_size': 16,
'n_channels': 4,
'n_filters': 256,
'stride': 8},
'main_args': {'exp_dir': 'exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662/',
'help': None,
'pretrain': True},
'masknet': {'audio_chan': 4,
'bn_chan': 128,
'causal': False,
'conv_kernel_size': 3,
'hid_chan': 256,
'in_chan': 256,
'mask_act': 'relu',
'n_blocks': 8,
'n_repeats': 3,
'norm_type': 'gLN',
'skip_chan': 128},
'optim': {'lr': 0.001, 'optimizer': 'adam', 'weight_decay': 0.0},
'positional arguments': {},
'training': {'batch_size': 12,
main(arg_dic)
'early_stop': False,
'epochs': 75,
'half_lr': True,
'num_workers': 8}}
Torch MVDR causality: False
Using stft {'n_filters': 4096, 'kernel_size': 4096, 'stride': 1024}
load data from data/2speakers/wav8k/min/tr\mix_reverb.json and ['data/2speakers/wav8k/min/tr\\s1_reverb.json', 'data/2speakers/wav8k/min/tr\\s2_reverb.json']
Drop 2925 utts(0.26 h) from 20000 (shorter than 32000 samples)
load data from data/2speakers/wav8k/min/cv\mix_reverb.json and ['data/2speakers/wav8k/min/cv\\s1_reverb.json', 'data/2speakers/wav8k/min/cv\\s2_reverb.json']
Drop 701 utts(0.06 h) from 5000 (shorter than 32000 samples)
File "train.py", line 67, in main
system = BeamTasNetSystem(pretrain=conf['main_args']['pretrain'],
File "D:\ZhangXu\Code\Beam-Guided-TasNet-main\Beam-Guided-TasNet-main\system.py", line 15, in __init__
super().__init__(*args, **kwargs)
File "C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\asteroid\engine\system.py", line 61, in __init__
self.hparams = Namespace(**self.config_to_hparams(self.config))
File "C:\Users\SASPL-1\anaconda3\envs\zx_38\lib\site-packages\torch\nn\modules\module.py", line 1225, in __setattr__
object.__setattr__(self, name, value)
AttributeError: can't set attribute
Stage 4: Training
mv: cannot stat 'exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662/checkpoints': No such file or directory
这个代码是我从github上拿到的代码,问过原作者他没出现过此类问题。原作者代码是在linux下运行的,而我是在windows下运行,不知道这和报错原因有关吗?或者和我安装的torch和pytorch-lightning版本有问题?我都安装的是最高版本。刚入门,希望大家帮助我,谢谢!
和这个问题很像,留存一下。https://github.com/allenai/comet-atomic-2020/issues/17
mv: cannot stat 'exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662/checkpoints': No such file or directory
exp/train_convtasnet_reverb2reverb_8kmin_e2ff7662/checkpoints
这个路径下的文件并不存在,检查一下路径或者文件名是否不对。
Filterbanks已经被asteroid_filterbanks所取代,并将在未来的版本中完全删除。请使用asteroid_filterbanks代替。
题主有点意思!