【深度学习】TypeError: must be real number, not NoneType

问题描述

报错的提示是:TypeError: must be real number, not NoneType
查了一下,应该就是说需要一个实际的数,但是我给了个None。
但我好久也没找着具体是哪里出了问题,也就不知道怎么解决,因此来这里提问了。

如果能找着直接问题所在,万分感谢!
或者教教我好的解决办法,然后让我自己解决也是十分可以的。

运行环境

Win10系统,python3.8,jupyter notebook,mxnet框架

报错


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
13-a568a2edcd5d> in <module>
      1 num_epochs, lr = 5, 0.5
----> 2 train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)

11-308a7f4acf4d> in train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr, trainer)
     18         test_acc = evaluate_accuracy(test_iter, net)
     19         train_acc_2 = evaluate_accuracy(train_iter, net)
---> 20         print('epoch %d, loss %.4f, train acc %.3f, trian acc 2 %.3f, test acc %.3f' % (epoch + 1, train_l_sum / n, train_acc_sum / n, train_acc_2, test_acc))

TypeError: must be real number, not NoneType

代码

%matplotlib inline
import d2lzh as d2l
from mxnet import nd, autograd, init, gluon
from mxnet.gluon import loss as gloss, data as gdata, nn

batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

num_inputs, num_outputs = 784, 10
num_hiddens, num_hiddens2 = 256, 30

W1 = nd.random.normal(scale=0.01, shape=(num_inputs, num_hiddens))
b1 = nd.zeros(num_hiddens)
W2 = nd.random.normal(scale=0.01, shape=(num_hiddens, num_hiddens2))
b2 = nd.zeros(num_hiddens2)
W3 = nd.random.normal(scale=0.01, shape=(num_hiddens2, num_outputs))
b3 = nd.zeros(num_outputs)
params = [W1, b1, W2, b2, W3, b3]

for param in params:
    param.attach_grad()

def relu(X):
    return nd.maximum(X, 0)

def net(X):
    X = X.reshape((-1, num_inputs))
    H = relu(nd.dot(X, W1) + b1)
    H2 = relu(nd.dot(H, W2) + b2)
    return nd.dot(H2, W3) + b3

def sgd(params, lr, batch_size):
    for param in params:
        param[:] = param - lr * param.grad / batch_size

def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    for X, y in data_iter:
        y = y.astype('float32')
        acc_sum += (net(X).argmax(axis=1) == y).sum().asscalar()

def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
              params=None, lr=None, trainer=None):
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat, y).sum()
            l.backward()
            if trainer is None:
                sgd(params, lr, batch_size)
            else:
                trainer.step(batch_size)
            y = y.astype('float32')
            train_l_sum += l.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter, net)
        train_acc_2 = evaluate_accuracy(train_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, trian acc 2 %.3f, test acc %.3f' % (epoch + 1, train_l_sum / n, train_acc_sum / n, train_acc_2, test_acc))

loss = gloss.SoftmaxCrossEntropyLoss()

num_epochs, lr = 5, 0.5
train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)

def evaluate_accuracy(data_iter,

你这个方法没有return,所以是None