class RNN(nn.Module):
def __init__(self,voc_size):
super(RNN,self).__init__()
self.voc_size=voc_size
self.rnn_layer=nn.RNN(voc_size,5000)
self.linear=nn.Linear(5000,voc_size)
def forward(self,inputs,state_a):
one_hot=F.one_hot(torch.Tensor([inputs]).long(),self.voc_size).float().reshape(1,1,self.voc_size)
Y,state=self.rnn_layer(one_hot,state_a)
Y=Y.reshape(-1,Y.shape[-1])
out_put=self.linear(Y)
return out_put,state
def begin_state(self):
return torch.zeros((1,1,5000))
model=RNN(14243)
state=model.begin_state()
optimizer=torch.optim.Adam(model.parameters(),lr=0.02)
loss_func=torch.nn.CrossEntropyLoss()
for i in range(len(train)-1):
optimizer.zero_grad()
out_put,state=model(train[i],state)
flag=torch.Tensor([train[i+1]]).long()
loss=loss_func(out_put,flag)
loss.backward(retain_graph=True)
optimizer.step()
RuntimeError Traceback (most recent call last)
in
10
11 loss=loss_func(out_put,flag)
---> 12 loss.backward(retain_graph=True)
13
14 optimizer.step()
~/anaconda3/lib/python3.8/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
~/anaconda3/lib/python3.8/site-packages/torch/autograd/init.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable.execution_engine.run_backward(
131 tensors, grad_tensors, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [5000, 5000]], which is output 0 of TBackward, is at version 2; expected version 1 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!