租了个云GPU计算,占有GPU显存,但GPU占有率为0

租了个云GPU计算,将程序上传运行发现占有GPU显存,但GPU占有率为0

img


使用网站提供的测试程序 ,GPU占有率在80%多
我使用的torch1.10.1无GPU python3.9
配置的镜像是torch1.9 python 3.8 Cuda11.1
这是我的程序
有什么问题吗

import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils
import torchvision.utils as vutils
import numpy as np
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from ganresnet4 import netD,netG

manualSeed = 64
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)

dataroot = "仿真图片/problemimagetrain"
workers =0
batch_size =64
image_size = 128
nz=100
num_epochs= 10
lr = 0.0002
beta1 = 0.5

dataset = dset.ImageFolder(root=dataroot,
                           transform=transforms.Compose([
                               transforms.Resize(image_size),
                               transforms.CenterCrop(image_size),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                           ]))

dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
                                         shuffle=True, num_workers=workers)
device = 'cuda' if torch.cuda.is_available() else 'cpu'

real_batch = next(iter(dataloader))
plt.figure(figsize=(8,8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:24], padding=2, normalize=True).cpu(),(1,2,0)))
plt.show()
def weights_init(model):
    for m in model.modules():
        if isinstance(m, (nn.Conv2d,nn.ConvTranspose2d)):
             nn.init.normal_(m.weight.data,0.0,0.02)
        elif isinstance(m,nn.BatchNorm2d):
             nn.init.normal_(m.weight.data, 1.0, 0.02)
             nn.init.constant_(m.bias.data, 0)
netD = netD().to(device)
netG = netG().to(device)
netG.apply(weights_init)
netD.apply(weights_init)
criterion = nn.BCELoss()
real_label = 1.
fake_label = 0.


optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
fixed_noise = torch.randn(24, nz, 1, 1, device=device)
img_list = []
G_losses = []
D_losses = []
iters = 0
writer_real=SummaryWriter("real4")
writer_fake=SummaryWriter("fake4")
print("Starting Training Loop...")

for epoch in range(num_epochs):
    for i, data in enumerate(dataloader, 0):

        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        ## Train with all-real batch
        netD.zero_grad()

        real_cpu = data[0].to(device)
        b_size = real_cpu.size(0)
        label = torch.full((b_size*4,), real_label, dtype=torch.float, device=device)

        output = netD(real_cpu).view(-1)

        errD_real = criterion(output, label)

        errD_real.backward()
        D_x = output.mean().item()

        ## Train with all-fake batch
        # Generate batch of latent vectors
        noise = torch.randn(b_size, nz, 1, 1, device=device)
        # Generate fake image batch with G
        fake = netG(noise)
        label.fill_(fake_label)
        # Classify all fake batch with D
        output = netD(fake.detach()).view(-1)
        # Calculate D's loss on the all-fake batch
        errD_fake = criterion(output, label)
        # Calculate the gradients for this batch, accumulated (summed) with previous gradients
        errD_fake.backward()
        D_G_z1 = output.mean().item()
        # Compute error of D as sum over the fake and the real batches
        errD = errD_real + errD_fake
        # Update D
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        netG.zero_grad()
        label.fill_(real_label)

        output = netD(fake).view(-1)
        errG = criterion(output, label)
        errG.backward()
        D_G_z2 = output.mean().item()
        # Update G
        optimizerG.step()

        if i % 4 == 0:
            print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, i, len(dataloader),
                     errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))

        G_losses.append(errG.item())
        D_losses.append(errD.item())

        if (iters % 5 == 0) or ((epoch == num_epochs - 1) and (i == len(dataloader) - 1)):
            with torch.no_grad():
                fake = netG(fixed_noise).detach().cpu()
                data1 = real_cpu
                img_list_fake = torchvision.utils.make_grid(fake, normalize=True)
                img_list_real = torchvision.utils.make_grid(data1, normalize=True)
                writer_fake.add_image(
                    "Fake imge", img_list_fake, global_step=iters
                )
                writer_fake.add_image(
                    "Real imge", img_list_real, global_step=iters
                )
            img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
            torch.save(netG.state_dict(), 'DCGANlossmodel_G_save.path')
            torch.save(netD.state_dict(), 'DCGANlossmodel_D_save.path')
        iters += 1
writer_real.close()
writer_fake.close()
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses, label="G")
plt.plot(D_losses, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:24], padding=5, normalize=True).cpu(),(1,2,0)))

plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.show()

  • 关于该问题,我找了一篇非常好的博客,你可以看看是否有帮助,链接:在GPU刨过的坑