Filter to an image (Convolution layer)
tf.keras.layers.Conv2D
tf.keras.layers.Activation
tf.keras.layers.MaxPool2D
tf.keras.layers.Flatten
tf.keras.layers.Dense
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
np.random.seed(7777)
tf.random.set_seed(7777)
class DataLoader():
def __init__(self):
# data load
(self.train_x, self.train_y), \
(self.test_x, self.test_y) = tf.keras.datasets.mnist.load_data()
def scale(self, x):
return (x / 255.0).astype(np.float32)
def preprocess_dataset(self, dataset):
(feature, target) = dataset
# scaling #
scaled_x = np.array([self.scale(x) for x in feature])
# Add channel axis 가짜 차원#
expanded_x = scaled_x[:, :, :, np.newaxis]
# label encoding #
ohe_y = np.array([tf.keras.utils.to_categorical(
y, num_classes=10) for y in target])
return expanded_x, ohe_y
def get_train_dataset(self):
return self.preprocess_dataset((self.train_x, self.train_y))
def get_test_dataset(self):
return self.preprocess_dataset((self.test_x, self.test_y))
# shape, dtype 확인하기
mnist_loader = DataLoader()
train_x, train_y = mnist_loader.get_train_dataset()
test_x, test_y = mnist_loader.get_test_dataset()
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
model = tf.keras.Sequential()
# 최초의 레이어는 Input의 shape을 명시해준다. (이 때 배치 axis는 무시한다.)
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu', input_shape=(28, 28, 1))) # 첫번째 layer input_shape넣어줘야함
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(64, kernel_size=3, padding='same', activation='relu'))
model.add(Conv2D(64, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
lr = 0.03
opt = tf.keras.optimizers.Adam(lr)
loss = tf.keras.losses.categorical_crossentropy
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
hist = model.fit(train_x, train_y, epochs=2, batch_size=128, validation_data=(test_x, test_y))
hist.history
plt.figure(figsize=(10, 5))
plt.subplot(221)
plt.plot(hist.history['loss'])
plt.title("loss")
plt.subplot(222)
plt.plot(hist.history['accuracy'], 'b-')
plt.title("acc")
plt.subplot(223)
plt.plot(hist.history['val_loss'])
plt.title("val_loss")
plt.subplot(224)
plt.plot(hist.history['val_accuracy'], 'b-')
plt.title("val_accuracy")
plt.tight_layout()
plt.show()
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
np.random.seed(7777)
tf.random.set_seed(7777)
from tensorflow.keras.layers import Input, Conv2D, MaxPool2D, Flatten, Dense
input_shape = (28, 28, 1)
inputs = Input(input_shape)
net = Conv2D(32, kernel_size=3, padding='same', activation='relu')(inputs)
net = Conv2D(32, kernel_size=3, padding='same', activation='relu')(net)
net = MaxPool2D()(net)
net = Conv2D(64, kernel_size=3, padding='same', activation='relu')(net)
net = Conv2D(64, kernel_size=3, padding='same', activation='relu')(net)
net = MaxPool2D()(net)
net = Flatten()(net)
net = Dense(128, activation="relu")(net)
net = Dense(64, activation="relu")(net)
net = Dense(10, activation="softmax")(net)
model = tf.keras.Model(inputs=inputs, outputs=net, name='VGG') #name은 모델이름 설정
model.summary()
Reference
1) 제로베이스 데이터스쿨 강의자료