有关pytorch的backward的问题

问题遇到的现象和发生背景

有关pytorch的backward的问题,我在对自己写的一个GAN进行训练的时候,总是提示Trying to backward through the graph a second time,但是我看我的代码和网络上的基本是一致的,为什么会出现这种错误呢?

问题相关代码
import torch
from datasets_load import Anime_data
from torch.utils.data import DataLoader
from model_net import DCGAN_generator, DCGAN_discriminator
# import visdom

# vis = visdom.Visdom(env='generation images')

epoch = 10
learn_rate = 0.0001
batch_size = 64

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
datasets_train = Anime_data('./datasets', 64, mode='train')
datasets_test = Anime_data('./datasets', 64, mode='test')
train_dataloader = DataLoader(datasets_train, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(datasets_test, batch_size=batch_size, shuffle=True)
train_size = len(datasets_train)
test_size = len(datasets_test)
print("----------device:{}----------".format(device))
print("----------train size:{}----------".format(train_size))
print("----------test size:{}----------".format(test_size))

generator = DCGAN_generator()
discriminator = DCGAN_discriminator()
generator = generator.to(device)
discriminator = discriminator.to(device)

# loss function
loss_fn = torch.nn.BCELoss()
loss_fn = loss_fn.to(device)

# optim
gen_optim = torch.optim.Adam(generator.parameters(), lr=learn_rate)
dis_optim = torch.optim.Adam(discriminator.parameters(), lr=learn_rate)

for i in range(epoch):
    total_loss_gen = 0
    total_loss_dis = 0
    print("----------epoch:{}----------".format(i))
    for data in train_dataloader:
        imgs, targets = data
        size = imgs.shape[0]
        imgs = imgs.to(device)
        targets = targets.to(device)
        random_noise = torch.randn(size, 100, device=device)
        # discriminator train
        discriminator.train()
        dis_optim.zero_grad()
        real_output = discriminator(imgs)
        dis_real_loss = loss_fn(real_output, torch.ones_like(real_output))
        dis_real_loss.backward()
        # generator imgs
        gen_imgs = generator(random_noise)
        fake_output = discriminator(gen_imgs)
        dis_fake_loss = loss_fn(fake_output, torch.zeros_like(fake_output))
        dis_fake_loss.backward()
        dis_loss = dis_fake_loss + dis_real_loss
        print("discriminator loss: {}".format(dis_loss))
        # dis_loss.backward()
        dis_optim.step()

        generator.train()
        gen_loss = loss_fn(fake_output, torch.ones_like(fake_output))
        gen_optim.zero_grad()
        print("generator loss: {}".format(gen_loss))
        gen_loss.backward()
        gen_optim.step()
        print("----------train step----------")

运行结果及报错内容
Traceback (most recent call last):
  File "E:/代码/deep_learning/GAN_anime/train.py", line 68, in 
    gen_loss.backward()
  File "D:\anaconda\lib\site-packages\torch\_tensor.py", line 396, in backward
    torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
  File "D:\anaconda\lib\site-packages\torch\autograd\__init__.py", line 173, in backward
    Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.