import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pickle
import os
from tensorboardX import SummaryWriter
import time
class ModelNetTrainer(object):
def __init__(self, model, train_loader, val_loader, optimizer, loss_fn, \
model_name, log_dir, num_views=12):
self.optimizer = optimizer
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.loss_fn = loss_fn
self.model_name = model_name
self.log_dir = log_dir
self.num_views = num_views
self.model.cuda()
if self.log_dir is not None:
self.writer = SummaryWriter(log_dir)
def train(self, n_epochs):
best_acc = 0
i_acc = 0
i = 0
self.model.train()
for epoch in range(n_epochs):
# permute data for mvcnn
rand_idx = np.random.permutation(int(len(self.train_loader.dataset.filepaths)/self.num_views))
filepaths_new = []
for i in range(len(rand_idx)):
filepaths_new.extend(self.train_loader.dataset.filepaths[rand_idx[i]*self.num_views:(rand_idx[i]+1)*self.num_views])
self.train_loader.dataset.filepaths = filepaths_new
# plot learning rate
lr = self.optimizer.state_dict()['param_groups'][0]['lr']
self.writer.add_scalar('params/lr', lr, epoch)
# train one epoch
out_data = None
in_data = None
for i, data in enumerate(self.train_loader):
if self.model_name == 'mvcnn':
N,V,C,H,W = data[1].size()
in_data = Variable(data[1]).view(-1,C,H,W).cuda()
else:
in_data = Variable(data[1].cuda())
target = Variable(data[0]).cuda().long()
self.optimizer.zero_grad()
out_data = self.model(in_data)
loss = self.loss_fn(out_data, target)
self.writer.add_scalar('train/train_loss', loss, i_acc+i+1)
pred = torch.max(out_data, 1)[1]
results = pred == target
correct_points = torch.sum(results.long())
acc = correct_points.float()/results.size()[0]
self.writer.add_scalar('train/train_overall_acc', acc, i_acc+i+1)
loss.backward()
self.optimizer.step()
log_str = 'epoch %d, step %d: train_loss %.3f; train_acc %.3f' % (epoch+1, i+1, loss, acc)
if (i+1)%1==0:
print(log_str)
i_acc += i
# evaluation
if (epoch+1)%1==0:
with torch.no_grad():
loss, val_overall_acc, val_mean_class_acc = self.update_validation_accuracy(epoch)
self.writer.add_scalar('val/val_mean_class_acc', val_mean_class_acc, epoch+1)
self.writer.add_scalar('val/val_overall_acc', val_overall_acc, epoch+1)
self.writer.add_scalar('val/val_loss', loss, epoch+1)
# save best model
if val_overall_acc > best_acc:
best_acc = val_overall_acc
self.model.save(self.log_dir, epoch)
# adjust learning rate manually
if epoch > 0 and (epoch+1) % 10 == 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr']*0.5
# export scalar data to JSON for external processing
self.writer.export_scalars_to_json(self.log_dir+"/all_scalars.json")
self.writer.close()
def update_validation_accuracy(self, epoch):
all_correct_points = 0
all_points = 0
# in_data = None
# out_data = None
# target = None
wrong_class = np.zeros(40)
samples_class = np.zeros(40)
all_loss = 0
self.model.eval()
avgpool = nn.AvgPool1d(1, 1)
total_time = 0.0
total_print_time = 0.0
all_target = []
all_pred = []
for _, data in enumerate(self.val_loader, 0):
if self.model_name == 'mvcnn':
N,V,C,H,W = data[1].size()
in_data = Variable(data[1]).view(-1,C,H,W).cuda()
else:#'svcnn'
in_data = Variable(data[1]).cuda()
target = Variable(data[0]).cuda()
out_data = self.model(in_data)
pred = torch.max(out_data, 1)[1]
all_loss += self.loss_fn(out_data, target).cpu().data.numpy()
results = pred == target
for i in range(results.size()[0]):
if not bool(results[i].cpu().data.numpy()):
wrong_class[target.cpu().data.numpy().astype('int')[i]] += 1
samples_class[target.cpu().data.numpy().astype('int')[i]] += 1
correct_points = torch.sum(results.long())
all_correct_points += correct_points
all_points += results.size()[0]
print ('Total # of test models: ', all_points)
val_mean_class_acc = np.mean((samples_class-wrong_class)/samples_class)
acc = all_correct_points.float() / all_points
val_overall_acc = acc.cpu().data.numpy()
loss = all_loss / len(self.val_loader)
print ('val mean class acc. : ', val_mean_class_acc)
print ('val overall acc. : ', val_overall_acc)
print ('val loss : ', loss)
self.model.train()
return loss, val_overall_acc, val_mean_class_acc
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import os,shutil,json
import argparse
from tools.Trainer import ModelNetTrainer
from tools.ImgDataset import MultiviewImgDataset, SingleImgDataset
from models.MVCNN import MVCNN, SVCNN
parser = argparse.ArgumentParser()
parser.add_argument("-name", "--name", type=str, help="Name of the experiment", default="MVCNN")
parser.add_argument("-bs", "--batchSize", type=int, help="Batch size for the second stage", default=8)# it will be *12 images in each batch for mvcnn
parser.add_argument("-num_models", type=int, help="number of models per class", default=1000)
parser.add_argument("-lr", type=float, help="learning rate", default=5e-5)
parser.add_argument("-weight_decay", type=float, help="weight decay", default=0.0)
parser.add_argument("-no_pretraining", dest='no_pretraining', action='store_true')
parser.add_argument("-cnn_name", "--cnn_name", type=str, help="cnn model name", default="vgg11")
parser.add_argument("-num_views", type=int, help="number of views", default=12)
parser.add_argument("-train_path", type=str, default=r"D:\PycharmProjects\data\ModelNet40\ModelNet40\*\train")
parser.add_argument("-val_path", type=str, default=r"D:\PycharmProjects\data\ModelNet40\ModelNet40\*\test")
parser.set_defaults(train=False)
def create_folder(log_dir):
# make summary folder
if not os.path.exists(log_dir):
os.mkdir(log_dir)
else:
print('WARNING: summary folder already exists!! It will be overwritten!!')
shutil.rmtree(log_dir)
os.mkdir(log_dir)
if __name__ == '__main__':
args = parser.parse_args()
pretraining = not args.no_pretraining
log_dir = args.name
create_folder(args.name)
config_f = open(os.path.join(log_dir, 'config.json'), 'w')
json.dump(vars(args), config_f)
config_f.close()
# STAGE 1
log_dir = args.name+'_stage_1'
create_folder(log_dir)
cnet = SVCNN(args.name, nclasses=40, pretraining=pretraining, cnn_name=args.cnn_name)
optimizer = optim.Adam(cnet.parameters(), lr=args.lr, weight_decay=args.weight_decay)
n_models_train = args.num_models*args.num_views
train_dataset = SingleImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=0)
val_dataset = SingleImgDataset(args.val_path, scale_aug=False, rot_aug=False, test_mode=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0)
print('num_train_files: '+str(len(train_dataset.filepaths)))
print('num_val_files: '+str(len(val_dataset.filepaths)))
trainer = ModelNetTrainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1)
trainer.train(30)
# STAGE 2
log_dir = args.name+'_stage_2'
create_folder(log_dir)
cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views)
del cnet
optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))
train_dataset = MultiviewImgDataset(args.train_path, scale_aug=False, rot_aug=False, num_models=n_models_train, num_views=args.num_views)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)# shuffle needs to be false! it's done within the trainer
val_dataset = MultiviewImgDataset(args.val_path, scale_aug=False, rot_aug=False, num_views=args.num_views)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batchSize, shuffle=False, num_workers=0)
print('num_train_files: '+str(len(train_dataset.filepaths)))
print('num_val_files: '+str(len(val_dataset.filepaths)))
trainer = ModelNetTrainer(cnet_2, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'mvcnn', log_dir, num_views=args.num_views)
trainer.train(30)
(pytorch04) C:\Users\MT\Desktop\mvcnn_pytorch-master>python train_mvcnn.py -name
"mvcnn" -num_models 1000 -weight_decay 0.001 -num_views 12 -cnn_name vgg11
WARNING: summary folder already exists!! It will be overwritten!!
WARNING: summary folder already exists!! It will be overwritten!!
num_train_files: 0
num_val_files: 0
Total # of test models: 0
C:\Users\MT\Desktop\mvcnn_pytorch-master\tools\Trainer.py:149: RuntimeWarning: i
nvalid value encountered in true_divide
val_mean_class_acc = np.mean((samples_class-wrong_class)/samples_class)
Traceback (most recent call last):
File "train_mvcnn.py", line 62, in <module>
trainer.train(30)
File "C:\Users\MT\Desktop\mvcnn_pytorch-master\tools\Trainer.py", line 85, in
train
loss, val_overall_acc, val_mean_class_acc = self.update_validation_accuracy(
epoch)
File "C:\Users\MT\Desktop\mvcnn_pytorch-master\tools\Trainer.py", line 150, in
update_validation_accuracy
acc = all_correct_points.float() / all_points
AttributeError: 'int' object has no attribute 'float'
all_correct_points.float()是要干啥
你是想
float(all_correct_points)
吗