import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import os
print(tf.__version__) # Tensorflow์ ๋ฒ์ ์ ์ถ๋ ฅ
mnist = keras.datasets.mnist # MNIST ๋ฐ์ดํฐ๋ฅผ ๋ก๋. ๋ค์ด๋ก๋ํ์ง ์์๋ค๋ฉด ๋ค์ด๋ก๋๊น์ง ์๋์ผ๋ก ์งํ๋ฉ๋๋ค.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(len(x_train)) # x_train ๋ฐฐ์ด์ ํฌ๊ธฐ๋ฅผ ์ถ๋ ฅ
2.6.0
60000
plt.imshow(x_train[0],cmap=plt.cm.binary)
plt.show()
plt.imshow(x_train[0],cmap=plt.cm.binary)
plt.show()
plt.imshow(x_train[1],cmap=plt.cm.binary)
plt.show()
print(y_train[0])
print(y_train[0])
5
index=10050
plt.imshow(x_train[index],cmap=plt.cm.binary)
plt.show()
print('๋ฌธ์ :', (index+1), '๋ฒ์งธ ์ด๋ฏธ์ง ',' ๋ต : ์ซ์', y_train[index])
๋ฌธ์ : 10051 ๋ฒ์งธ ์ด๋ฏธ์ง ๋ต : ์ซ์ 7
x = int(input('1~ 60000 ์ฌ์ด์ ์ซ์๋ฅผ ๋ฃ์ด๋ผ ->'))
index =int(x - 1)
plt.imshow(x_train[index],cmap=plt.cm.binary)
plt.show()
print('๋ฌธ์ :', (index+1), '๋ฒ์งธ ์ด๋ฏธ์ง ',' ๋ต : ์ซ์', y_train[index])
1~ 60000 ์ฌ์ด์ ์ซ์๋ฅผ ๋ฃ์ด๋ผ ->2
๋ฌธ์ : 2 ๋ฒ์งธ ์ด๋ฏธ์ง ๋ต : ์ซ์ 0
print(x_train.shape)
(60000, 28, 28)
# 60,000์ฅ๊ณผ 28x28 ํฝ์
print(x_test.shape)
(10000, 28, 28)
# 10,000์ฅ 28*28ํฝ์
.
print('์ต์๊ฐ:',np.min(x_train), ' ์ต๋๊ฐ:',np.max(x_train))
์ต์๊ฐ: 0 ์ต๋๊ฐ: 255
x_train_norm, x_test_norm = x_train / 255.0, x_test / 255.0
print('์ต์๊ฐ:',np.min(x_train_norm), ' ์ต๋๊ฐ:',np.max(x_train_norm))
์ต์๊ฐ: 0.0 ์ต๋๊ฐ: 1.0
model=keras.models.Sequential()
model.add(keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(28,28,1)))
model.add(keras.layers.MaxPool2D(2,2))
model.add(keras.layers.Conv2D(32, (3,3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2,2)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
print('Model์ ์ถ๊ฐ๋ Layer ๊ฐ์: ', len(model.layers))
Model์ ์ถ๊ฐ๋ Layer ๊ฐ์: 7
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 16) 160
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 13, 13, 16) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 11, 11, 32) 4640
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 5, 5, 32) 0
_________________________________________________________________
flatten (Flatten) (None, 800) 0
_________________________________________________________________
dense (Dense) (None, 32) 25632
_________________________________________________________________
dense_1 (Dense) (None, 10) 330
=================================================================
Total params: 30,762
Trainable params: 30,762
Non-trainable params: 0
_________________________________________________________________
์ด๊ฒ ๋ฅ๋ฌ๋ ๋คํธ์ํฌ ๋ชจ๋ธ์ด๋ค
์ ๋ ฅ์ ํํ๋ (๋ฐ์ดํฐ ๊ฐฏ์, ์ด๋ฏธ์ง ํฌ๊ธฐ x, ์ด๋ฏธ์ง ํฌ๊ธฐ y, ์ฑ๋์)์ ํํ์ด๋ค.
์ด๊ฑธ input_shape=(28,28,1)๋ก ์ง์ ํ ๊ฑฐ๋ค
๊ทผ๋ฐ print(x_train.shape)๋ฅผ ํด๋ณด๋ฉด (60000, 28, 28)๋ก ์ฑ๋์๊ฐ ์๋ค๊ทธ๋ฌ๋๊น ์ฑ๋๋ ๋ฃ์ด์ฃผ์ด์ผ ํ๋ค -> ์ฑ๋์ ์๊ตฌ์ฑ์ ๋งํ๋ค ํ๋ฐฑ:1 ์นผ๋ผRGB : 3
print("Before Reshape - x_train_norm shape: {}".format(x_train_norm.shape))
print("Before Reshape - x_test_norm shape: {}".format(x_test_norm.shape))
x_train_reshaped=x_train_norm.reshape( -1, 28, 28, 1) # ๋ฐ์ดํฐ๊ฐฏ์์ -1์ ์ฐ๋ฉด reshape์ ์๋๊ณ์ฐ๋ฉ๋๋ค.
x_test_reshaped=x_test_norm.reshape( -1, 28, 28, 1)
print("After Reshape - x_train_reshaped shape: {}".format(x_train_reshaped.shape))
print("After Reshape - x_test_reshaped shape: {}".format(x_test_reshaped.shape))
Before Reshape - x_train_norm shape: (60000, 28, 28)
Before Reshape - x_test_norm shape: (10000, 28, 28)
After Reshape - x_train_reshaped shape: (60000, 28, 28, 1)
After Reshape - x_test_reshaped shape: (10000, 28, 28, 1)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_reshaped, y_train, epochs=10)
Epoch 1/10
1875/1875 [==============================] - 10s 3ms/step - loss: 0.2158 - accuracy: 0.9334
Epoch 2/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0682 - accuracy: 0.9793
Epoch 3/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0496 - accuracy: 0.9851
Epoch 4/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0388 - accuracy: 0.9878
Epoch 5/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0326 - accuracy: 0.9899
Epoch 6/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0270 - accuracy: 0.9913
Epoch 7/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0223 - accuracy: 0.9930
Epoch 8/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0201 - accuracy: 0.9934
Epoch 9/10
1875/1875 [==============================] - 6s 3ms/step - loss: 0.0159 - accuracy: 0.9950
Epoch 10/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0138 - accuracy: 0.9958
<keras.callbacks.History at 0x7fa094fbfb50>
test_loss, test_accuracy = model.evaluate(x_test_reshaped,y_test, verbose=2)
print("test_loss: {} ".format(test_loss))
print("test_accuracy: {}".format(test_accuracy))
313/313 - 1s - loss: 0.0394 - accuracy: 0.9906
test_loss: 0.03935988247394562
test_accuracy: 0.9905999898910522
- 100%๋ ์๋์จ๋ค.
- ๋ฐ์ดํฐ ๋ณด๋ฉด ์๊ธ์จ ์ฃผ์ธ์ด ๋ค๋ฅธ ๊ฒ๋ ์๋ค.
- ์ฒ์๋ณด๋ ํ์ฒด๋ ์๋ค.
predicted_result = model.predict(x_test_reshaped) # model์ด ์ถ๋ก ํ ํ๋ฅ ๊ฐ.
predicted_labels = np.argmax(predicted_result, axis=1)
idx=0 #1๋ฒ์งธ x_test๋ฅผ ์ดํด๋ณด์.
print('model.predict() ๊ฒฐ๊ณผ : ', predicted_result[idx])
print('model์ด ์ถ๋ก ํ ๊ฐ์ฅ ๊ฐ๋ฅ์ฑ์ด ๋์ ๊ฒฐ๊ณผ : ', predicted_labels[idx])
print('์ค์ ๋ฐ์ดํฐ์ ๋ผ๋ฒจ : ', y_test[idx])
model.predict() ๊ฒฐ๊ณผ : [5.0936549e-10 4.2054354e-10 1.4162788e-07 3.0713025e-07 1.0975039e-09
4.7302101e-10 4.8337666e-17 9.9998915e-01 1.5172457e-09 1.0410251e-05]
model์ด ์ถ๋ก ํ ๊ฐ์ฅ ๊ฐ๋ฅ์ฑ์ด ๋์ ๊ฒฐ๊ณผ : 7
์ค์ ๋ฐ์ดํฐ์ ๋ผ๋ฒจ : 7
[0์ผ๋ก ์ถ๋ก ํ ํ๋ฅ , 1๋ก์ถ๋ก , ...., 7๋ก ์ถ๋ก :, 8๋ก, 9๋ก]
predicted_result = model.predict(x_test_reshaped) # model์ด ์ถ๋ก ํ ํ๋ฅ ๊ฐ.
predicted_labels = np.argmax(predicted_result, axis=1)
idx=1 #2๋ฒ์งธ x_test๋ฅผ ์ดํด๋ณด์.
print('model.predict() ๊ฒฐ๊ณผ : ', predicted_result[idx])
print('model์ด ์ถ๋ก ํ ๊ฐ์ฅ ๊ฐ๋ฅ์ฑ์ด ๋์ ๊ฒฐ๊ณผ : ', predicted_labels[idx])
print('์ค์ ๋ฐ์ดํฐ์ ๋ผ๋ฒจ : ', y_test[idx])
model.predict() ๊ฒฐ๊ณผ : [3.8869772e-11 5.8056837e-10 1.0000000e+00 2.7700270e-13 6.0729700e-16
9.5813839e-24 2.9645359e-13 1.0713646e-12 1.4265510e-10 7.0767866e-18]
model์ด ์ถ๋ก ํ ๊ฐ์ฅ ๊ฐ๋ฅ์ฑ์ด ๋์ ๊ฒฐ๊ณผ : 2
์ค์ ๋ฐ์ดํฐ์ ๋ผ๋ฒจ : 2
plt.imshow(x_test[idx],cmap=plt.cm.binary)
plt.show()
import random
wrong_predict_list=[]
for i, _ in enumerate(predicted_labels):
# i๋ฒ์งธ test_labels๊ณผ y_test์ด ๋ค๋ฅธ ๊ฒฝ์ฐ๋ง ๋ชจ์ ๋ด
์๋ค.
if predicted_labels[i] != y_test[i]:
wrong_predict_list.append(i)
# wrong_predict_list ์์ ๋๋คํ๊ฒ 5๊ฐ๋ง ๋ฝ์๋ด
์๋ค.
samples = random.choices(population=wrong_predict_list, k=5)
for n in samples:
print("์์ธกํ๋ฅ ๋ถํฌ: " + str(predicted_result[n]))
print("๋ผ๋ฒจ: " + str(y_test[n]) + ", ์์ธก๊ฒฐ๊ณผ: " + str(predicted_labels[n]))
plt.imshow(x_test[n], cmap=plt.cm.binary)
plt.show()
์์ธกํ๋ฅ ๋ถํฌ: [9.9993050e-01 2.2502122e-12 4.7690723e-06 3.0059752e-08 1.7519780e-13
8.0025169e-09 5.0630263e-08 6.8369168e-09 5.4815446e-05 9.7632701e-06]
๋ผ๋ฒจ: 8, ์์ธก๊ฒฐ๊ณผ: 0
์์ธกํ๋ฅ ๋ถํฌ: [4.3070769e-11 9.9670094e-01 2.3406226e-04 6.9325289e-08 9.2083937e-06
7.6245671e-10 3.0557083e-03 1.7472924e-13 8.3793372e-09 3.5816929e-12]
๋ผ๋ฒจ: 6, ์์ธก๊ฒฐ๊ณผ: 1
์์ธกํ๋ฅ ๋ถํฌ: [2.3253349e-11 1.5969941e-05 2.2154758e-03 1.5413132e-13 9.9764663e-01
7.1346483e-12 3.1916945e-12 1.1906381e-04 3.9855234e-08 2.7896983e-06]
๋ผ๋ฒจ: 2, ์์ธก๊ฒฐ๊ณผ: 4
์์ธกํ๋ฅ ๋ถํฌ: [5.2042344e-07 8.2894566e-08 4.1278052e-09 1.4097756e-06 5.7676568e-04
5.9966849e-05 7.5762877e-08 5.5446890e-07 5.0400358e-01 4.9535707e-01]
๋ผ๋ฒจ: 9, ์์ธก๊ฒฐ๊ณผ: 8
์์ธกํ๋ฅ ๋ถํฌ: [6.0286757e-14 7.1217093e-10 2.9044655e-14 5.7257526e-02 3.6976814e-11
9.4270760e-01 1.5280622e-12 1.6837074e-09 3.4841847e-05 1.6904140e-08]
๋ผ๋ฒจ: 3, ์์ธก๊ฒฐ๊ณผ: 5
# ๋ฐ๊ฟ ๋ณผ ์ ์๋ ํ์ดํผํ๋ผ๋ฏธํฐ๋ค
n_channel_1=16
n_channel_2=32
n_dense=32
n_train_epoch=10
model=keras.models.Sequential()
model.add(keras.layers.Conv2D(n_channel_1, (3,3), activation='relu', input_shape=(28,28,1)))
model.add(keras.layers.MaxPool2D(2,2))
model.add(keras.layers.Conv2D(n_channel_2, (3,3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2,2)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(n_dense, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# ๋ชจ๋ธ ํ๋ จ
model.fit(x_train_reshaped, y_train, epochs=n_train_epoch)
# ๋ชจ๋ธ ์ํ
test_loss, test_accuracy = model.evaluate(x_test_reshaped, y_test, verbose=2)
print("test_loss: {} ".format(test_loss))
print("test_accuracy: {}".format(test_accuracy))
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_2 (Conv2D) (None, 26, 26, 16) 160
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 13, 13, 16) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 11, 11, 32) 4640
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 5, 5, 32) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 800) 0
_________________________________________________________________
dense_2 (Dense) (None, 32) 25632
_________________________________________________________________
dense_3 (Dense) (None, 10) 330
=================================================================
Total params: 30,762
Trainable params: 30,762
Non-trainable params: 0
_________________________________________________________________
Epoch 1/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.1991 - accuracy: 0.9392
Epoch 2/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0726 - accuracy: 0.9779
Epoch 3/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0535 - accuracy: 0.9834
Epoch 4/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0434 - accuracy: 0.9864
Epoch 5/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0366 - accuracy: 0.9884
Epoch 6/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0311 - accuracy: 0.9903
Epoch 7/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0256 - accuracy: 0.9916
Epoch 8/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0215 - accuracy: 0.9930
Epoch 9/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0183 - accuracy: 0.9940
Epoch 10/10
1875/1875 [==============================] - 5s 3ms/step - loss: 0.0162 - accuracy: 0.9945
313/313 - 1s - loss: 0.0400 - accuracy: 0.9881
test_loss: 0.04003562033176422
test_accuracy: 0.988099992275238