CNN框架如上图所示,模型summary如下:
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 223, 223, 64) 4864 _________________________________________________________________ batch_normalization_1 (Batch (None, 223, 223, 64) 256 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 111, 111, 64) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 107, 107, 64) 102464 _________________________________________________________________ batch_normalization_2 (Batch (None, 107, 107, 64) 256 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 53, 53, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 49, 49, 128) 204928 _________________________________________________________________ batch_normalization_3 (Batch (None, 49, 49, 128) 512 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 24, 24, 128) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 22, 22, 256) 295168 _________________________________________________________________ batch_normalization_4 (Batch (None, 22, 22, 256) 1024 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 11, 11, 256) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 9, 9, 512) 1180160 _________________________________________________________________ batch_normalization_5 (Batch (None, 9, 9, 512) 2048 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 4, 4, 512) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 4, 4, 1024) 525312 _________________________________________________________________ batch_normalization_6 (Batch (None, 4, 4, 1024) 4096 _________________________________________________________________ max_pooling2d_5 (MaxPooling2 (None, 2, 2, 1024) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 2, 2, 1024) 1049600 _________________________________________________________________ batch_normalization_7 (Batch (None, 2, 2, 1024) 4096 _________________________________________________________________ max_pooling2d_7 (MaxPooling2 (None, 1, 1, 1024) 0 _________________________________________________________________ flatten (Flatten) (None, 1024) 0 _________________________________________________________________ dropout (Dropout) (None, 1024) 0 _________________________________________________________________ dense (Dense) (None, 1024) 1049600 _________________________________________________________________ dense_1 (Dense) (None, 32) 32800 ================================================================= Total params: 4,457,184 Trainable params: 4,451,040 Non-trainable params: 6,144
接下来我想使用SVM进行分类,请问应该如何操作
仅供参考
model = keras.Sequential([
tf.keras.Input(shape = (227, 227, 3)),
layers.Conv2D (64, (5,5), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope= 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.Conv2D (64, (5,5), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D (128, (5,5), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D (256, (3,3), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.Conv2D (512, (3,3), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1), #negative slope 1 is good
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.Conv2D (1024, (1,1), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.MaxPooling2D(pool_size=(3, 3), padding="same"),
layers.Reshape((1, 4096)),
layers.Dense(1024, kernel_regularizer = regularizers.l2(0.001), bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.Dropout(0.5),
layers.Dense(32, name="my_intermediate_layer", kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.Dense(5, activation='softmax', name="my_last_layer"),
])
model.summary()
model.compile(optimizer=Optimizer.Adam(lr=0.00005),loss='sparse_categorical_crossentropy',metrics=['accuracy'])
trained = model.fit(train_data,train_labels,epochs=30,validation_split=0.10)
model_feat = Model(inputs=model.input,outputs=model.get_layer('my_intermediate_layer').output)
feat_train = model_feat.predict(train_data)
print(feat_train.shape)
feat_train = feat_train.reshape(413,32)
from sklearn.svm import SVC
svm = SVC(kernel='rbf')
svm.fit(feat_train,train_labels)
svm.score(feat_train,train_labels)
完整代码:
import os
import numpy as np
import pandas as pd
import cv2
import warnings
warnings.filterwarnings('ignore')
from sklearn.utils import shuffle
import keras
import tensorflow as tf
from keras import layers
from keras import regularizers
from keras.models import Sequential, Model
import tensorflow.keras.optimizers as Optimizer
# Load data
train_dir =
test_dir =
train_cate = pd.read_csv()
test_cate = pd.read_csv()
np.array(train_cate[train_cate['Image_name']=='IDRiD_001']['Retinopathy grade'])
# Get data and labels
train_data = []
train_labels = []
def get_images(image_dir, labels_dir):
for image_file in os.listdir(image_dir):
image = cv2.imread(image_dir+r'/'+image_file)
image = cv2.resize(image,(227,227))
train_data.append(image)
labels = pd.read_csv(labels_dir)
label = list(labels[labels['Image_name'] + '.jpg' == image_file]['Retinopathy grade'])
train_labels.append(label)
return shuffle(train_data,train_labels,random_state=7)
train_data, train_labels = get_images( , )
train_data = np.array(train_data)
train_labels = np.array(train_labels)
train_labels.reshape(413)
# Build cnn model
model = keras.Sequential([
tf.keras.Input(shape = (227, 227, 3)),
layers.Conv2D (64, (5,5), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope= 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.Conv2D (64, (5,5), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D (128, (5,5), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D (256, (3,3), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.Conv2D (512, (3,3), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1), #negative slope 1 is good
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.Conv2D (1024, (1,1), strides=(1, 1), padding='same', kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2), padding="same"),
layers.MaxPooling2D(pool_size=(3, 3), padding="same"),
layers.Reshape((1, 4096)),
layers.Dense(1024, kernel_regularizer = regularizers.l2(0.001), bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.Dropout(0.5),
layers.Dense(32, name="my_intermediate_layer", kernel_regularizer = regularizers.l2(0.001),
bias_regularizer = regularizers.l2(0.001)),
layers.ReLU(negative_slope = 1),
layers.Dense(5, activation='softmax', name="my_last_layer"),
])
model.summary()
model.compile(optimizer=Optimizer.Adam(lr=0.00005),loss='sparse_categorical_crossentropy',metrics=['accuracy'])
# Train
trained = model.fit(train_data,train_labels,epochs=30,validation_split=0.10)
# SVM
model_feat = Model(inputs=model.input,outputs=model.get_layer('my_intermediate_layer').output)
feat_train = model_feat.predict(train_data)
print(feat_train.shape)
feat_train = feat_train.reshape(413,32)
from sklearn.svm import SVC
svm = SVC(kernel='rbf')
svm.fit(feat_train,train_labels)
svm.score(feat_train,train_labels)
你好,我也遇到同样问题,请问您解决了吗?