import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
img_rows, img_cols = 28, 28
if tf.keras.backend.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1)
input_shape = (1, 28, 28)
x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train = x_train / 255 x_test = x_test / 255 x_train.shape
(60000, 28, 28, 1)
# 独热编码 y_train_onehot = tf.keras.utils.to_categorical(y_train) y_test_onehot = tf.keras.utils.to_categorical(y_test)
# 搭建卷积神经模型 model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_2 (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 13, 13, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 11, 11, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 5, 5, 64) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 1600) 0 _________________________________________________________________ dense_2 (Dense) (None, 128) 204928 _________________________________________________________________ dropout_1 (Dropout) (None, 128) 0 _________________________________________________________________ dense_3 (Dense) (None, 10) 1290 ================================================================= Total params: 225,034 Trainable params: 225,034 Non-trainable params: 0 _________________________________________________________________
# 设置优化器、损失函数 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 模型训练 history = model.fit(x_train, y_train_onehot, batch_size = 256, epochs = 10, verbose=1, validation_data = (x_test, y_test_onehot))
Epoch 1/10 235/235 [==============================] - 20s 85ms/step - loss: 0.3861 - accuracy: 0.8831 - val_loss: 0.0713 - val_accuracy: 0.9781 Epoch 2/10 235/235 [==============================] - 19s 82ms/step - loss: 0.1077 - accuracy: 0.9687 - val_loss: 0.0496 - val_accuracy: 0.9840 Epoch 3/10 235/235 [==============================] - 21s 89ms/step - loss: 0.0787 - accuracy: 0.9769 - val_loss: 0.0384 - val_accuracy: 0.9874 Epoch 4/10 235/235 [==============================] - 20s 86ms/step - loss: 0.0668 - accuracy: 0.9799 - val_loss: 0.0342 - val_accuracy: 0.9881 Epoch 5/10 235/235 [==============================] - 21s 89ms/step - loss: 0.0547 - accuracy: 0.9832 - val_loss: 0.0331 - val_accuracy: 0.9885 Epoch 6/10 235/235 [==============================] - 19s 81ms/step - loss: 0.0499 - accuracy: 0.9843 - val_loss: 0.0276 - val_accuracy: 0.9903 Epoch 7/10 235/235 [==============================] - 20s 83ms/step - loss: 0.0436 - accuracy: 0.9869 - val_loss: 0.0280 - val_accuracy: 0.9908 Epoch 8/10 235/235 [==============================] - 20s 83ms/step - loss: 0.0404 - accuracy: 0.9879 - val_loss: 0.0265 - val_accuracy: 0.9905 Epoch 9/10 235/235 [==============================] - 20s 83ms/step - loss: 0.0362 - accuracy: 0.9888 - val_loss: 0.0272 - val_accuracy: 0.9912 Epoch 10/10 235/235 [==============================] - 19s 82ms/step - loss: 0.0336 - accuracy: 0.9895 - val_loss: 0.0273 - val_accuracy: 0.9906
score = model.evaluate(x_test, y_test_onehot, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
Test loss: 0.027276480570435524 Test accuracy: 0.9905999898910522