深度学习准确率很低的原因

代码如下:
#使用keras训练一个深度神经网络
#MNIST数据集预处理
#
import numpy as np
np.random.seed(1337)
from keras.datasets import mnist
(X_train,y_train),(X_test,y_test)=mnist.load_data()
img_rows,img_cols=28,28
X_train=X_train.reshape(X_train.shape[0],img_rows,img_cols,1)
X_test=X_test.reshape(X_test.shape[0],img_rows,img_cols,1)
input_shape=(img_rows,img_cols,1)
#确保矩阵中的数字是【0,1】之间的32位浮点型,而不是【0,255】之间的无符号整数
X_train=X_train.astype('float32')/255.0
X_test=X_test.astype('float32')/255.0
#对训练标签进行独热编码
from keras.utils import np_utils
n_classes=10
y_train=np_utils.to_categorical(y_train,n_classes)
y_test=np_utils.to_categorical(y_test,n_classes)
#创建一个卷积神经网络
from keras.models import Sequential
model=Sequential()
from keras.layers import Convolution2D
n_filters=32
kernel_size=(3,3)
model.add(Convolution2D(n_filters,kernel_size[0],kernel_size[1],padding='valid',input_shape=input_shape))
#使用一个修正线性单元作为激活函数
from keras.layers import Activation
model.add(Activation('relu'))
#
model.add(Convolution2D(n_filters,kernel_size[0],kernel_size[1]))
model.add(Activation('relu'))
#对激活步骤进行池化并添加一个Dropout层
from keras.layers import MaxPooling2D,Dropout
pool_size=(2,2)
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
#
from keras.layers import Flatten,Dense
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes))
model.add(Activation('softmax'))
#
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
#拟合模型
model.fit(X_train,y_train,batch_size=128,epochs=12,verbose=1,validation_data=(X_test,y_test))
print(model.evaluate(X_test,y_test,verbose=0))

结果:

有二个地方要改一下,

kernel_size[0],kernel_size[1]->kernel_size,要以元组形式参数传入.

运行的片断,准确率在98%左右:

Epoch 3/12
60000/60000 [==============================] - 316s 5ms/step - loss: 0.0807 - accuracy: 0.9753 - val_loss: 0.0363 - val_accuracy: 0.9884

 

>>> keras.__version__
'2.3.1'

>>> tensorflow.__version__
'2.2.0'