Sesac 49일차

SungMin·2022년 12월 13일
0

Sesac-ML_DL

목록 보기
10/16

교재 : 백견불여일타 딥러닝 입문 with 텐서플로우 2.x, 로드북

Fashion-mnist 살펴보기

  • 각 레이블에 해당하는 의류 품목 살펴보기
from keras.datasets.fashion_mnist import load_data
# fashion-mnist 다운받기
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape,x_test.shape)
(60000, 28, 28) (10000, 28, 28)
# fashion-mnist 항목 확인하기
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(777)
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

# 이제 확률값으로 나오는 것을 확인함
# 위치 값을 찾아서 구별해냄
sample_size = 9
random_idx = np.random.randint(60000, size=sample_size)

plt.figure(figsize = (5, 5))
for i, idx in enumerate(random_idx):
  plt.subplot(3, 3, i+1) # 3행 3열에 i값을 넣는데 0부터 시작하므로 1을 더해줌
  plt.xticks([]) # x축 눈금관련 정보. 빈 리스트를 넣어서 지워준다.
  plt.yticks([]) # y축
  plt.imshow(x_train[idx], cmap='gray') # index를 학습, cmap을 grayscale로 해줌
  plt.xlabel(class_names[y_train[idx]]) # 레이블을 가져옴
plt.show()

x_train.min(), x_train.max()
(0, 255)
x_train = x_train/255
x_test = x_test/255
x_train.min(), x_train.max()
(0.0, 1.0)
y_train.min(),y_train.max()
(0, 9)
from keras.utils import to_categorical
# 데이터 레이블을 범주형 형태로 변경
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_train.min(),y_train.max()
(0.0, 1.0)
y_train[0]
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1.], dtype=float32)
# 검증용 데이터셋 만들기
from sklearn.model_selection import train_test_split
# 학습/데이터 비율은 7:3으로 설정
x_train, x_val,y_train,y_val = train_test_split(x_train,y_train,
                                                        test_size=0.3,random_state=777)
from keras.models import Sequential
from keras.layers import Dense, Flatten
  • Flatten : 배치 크기를 제외하고 데이터를 1차원 배열의 형태로 변환해줌
    ex) (128, 6, 2, 2) 입력 -> (128, 24)
first_model = Sequential()
# 입력 데이터의 형태 명시
first_model.add(Flatten(input_shape=(28,28))) # (28, 28) -> (28 * 28)
first_model.add(Dense(64, activation = 'relu')) # 64개의 출력을 가지는 Dense층
first_model.add(Dense(32, activation = 'relu')) # 32개의 출력을 가지는 Dense층
first_model.add(Dense(10, activation = 'softmax')) # 10개의 출력을 가지는 신경망
first_model.compile(optimizer='adam', # 옵티마이저 : adam
                          loss='categorical_crossentropy', # 손실함수 : categorical_crossentropy
                          metrics=['acc']) # 평가지표 : acc
first_model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 flatten (Flatten)           (None, 784)               0         
                                                                 
 dense (Dense)               (None, 64)                50240     
                                                                 
 dense_1 (Dense)             (None, 32)                2080      
                                                                 
 dense_2 (Dense)             (None, 10)                330       
                                                                 
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________
first_history = first_model.fit(x_train, y_train, 
                                    epochs=30, batch_size=128, 
                                    validation_data=(x_val, y_val))
Epoch 1/30
329/329 [==============================] - 5s 12ms/step - loss: 0.6675 - acc: 0.7718 - val_loss: 0.4710 - val_acc: 0.8411
Epoch 2/30
329/329 [==============================] - 3s 10ms/step - loss: 0.4476 - acc: 0.8412 - val_loss: 0.4334 - val_acc: 0.8452
Epoch 3/30
329/329 [==============================] - 4s 11ms/step - loss: 0.4094 - acc: 0.8545 - val_loss: 0.4176 - val_acc: 0.8523

Epoch 28/30
329/329 [==============================] - 1s 4ms/step - loss: 0.2094 - acc: 0.9223 - val_loss: 0.3362 - val_acc: 0.8827
Epoch 29/30
329/329 [==============================] - 1s 4ms/step - loss: 0.2061 - acc: 0.9232 - val_loss: 0.3461 - val_acc: 0.8814
Epoch 30/30
329/329 [==============================] - 1s 4ms/step - loss: 0.1978 - acc: 0.9268 - val_loss: 0.3532 - val_acc: 0.8868
second_model = Sequential()

second_model.add(Flatten(input_shape=(28,28))) 
second_model.add(Dense(128, activation = 'relu')) # Dense층 추가가
second_model.add(Dense(64, activation = 'relu')) 
second_model.add(Dense(32, activation = 'relu')) 
second_model.add(Dense(10, activation = 'softmax')) 

second_model.compile(optimizer='adam', 
                          loss='categorical_crossentropy', 
                          metrics=['acc']) 

second_history = second_model.fit(x_train, y_train, 
                                    epochs=30, batch_size=128, 
                                    validation_data=(x_val, y_val))
Epoch 1/30
329/329 [==============================] - 3s 7ms/step - loss: 0.6283 - acc: 0.7854 - val_loss: 0.4367 - val_acc: 0.8508
Epoch 2/30
329/329 [==============================] - 2s 6ms/step - loss: 0.4201 - acc: 0.8505 - val_loss: 0.4171 - val_acc: 0.8543
Epoch 3/30
329/329 [==============================] - 2s 7ms/step - loss: 0.3761 - acc: 0.8644 - val_loss: 0.3660 - val_acc: 0.8706

Epoch 28/30
329/329 [==============================] - 2s 7ms/step - loss: 0.1587 - acc: 0.9407 - val_loss: 0.3638 - val_acc: 0.8881
Epoch 29/30
329/329 [==============================] - 3s 9ms/step - loss: 0.1529 - acc: 0.9418 - val_loss: 0.3760 - val_acc: 0.8862
Epoch 30/30
329/329 [==============================] - 3s 9ms/step - loss: 0.1498 - acc: 0.9428 - val_loss: 0.3840 - val_acc: 0.8853
second_model.summary()
Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 flatten_1 (Flatten)         (None, 784)               0         
                                                                 
 dense_3 (Dense)             (None, 128)               100480    
                                                                 
 dense_4 (Dense)             (None, 64)                8256      
                                                                 
 dense_5 (Dense)             (None, 32)                2080      
                                                                 
 dense_6 (Dense)             (None, 10)                330       
                                                                 
=================================================================
Total params: 111,146
Trainable params: 111,146
Non-trainable params: 0
_________________________________________________________________
# 두 모델의 학습 과정 그려보기
def draw_loss_acc(history1, history2, epochs):
  his_dict_1 = history1.history
  his_dict_2 = history2.history
  keys = list(his_dict_1.keys())

  epochs = range(1, epochs)
  fig = plt.figure(figsize=(10,10))
  ax = fig.add_subplot(1,1,1)
  # axis선과 ax의 축 레이블 제거
  ax.spines['top'].set_color('none')
  ax.spines['bottom'].set_color('none')
  ax.spines['left'].set_color('none')
  ax.spines['right'].set_color('none')
  ax.tick_params(labelcolor='w',top=False,bottom=False,left=False,right=False)

  for i in range(len(his_dict_1)):
    temp_ax = fig.add_subplot(2, 2, i+1)
    temp = keys[i%2] # i에 0, 1, 2, 3 순서로 들어감
    val_temp = keys[(i+2)%2 + 2] # 2, 3, 4, 5 순서로 들어감 -> 0, 1, 0, 1 -> 2, 3, 2, 3
    temp_history = his_dict_1 if i < 2 else his_dict_2 
    temp_ax.plot(epochs,temp_history[temp][1:],color='blue',label='train_'+temp)
    temp_ax.plot(epochs,temp_history[val_temp][1:],color='orange',label=val_temp)
    if(i==1 or i==3):  # i가 홀수값일 때
      start,end = temp_ax.get_ylim()
      temp_ax.yaxis.set_ticks(np.arange(np.round(start,2),end,0.01))
    temp_ax.legend()
  ax.set_ylabel('loss',size=20,labelpad=20)
  ax.set_xlabel('Epochs',size=20,labelpad=20)
  plt.tight_layout()
  plt.show()

draw_loss_acc(first_history, second_history, 30)

from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
from PIL import Image
import numpy as np
img = Image.open('/content/drive/MyDrive/Colab Notebooks/sesac_deeplearning/04_fashion_mnist_img/img02.jpg')
img = img.convert('L')
img = img.resize((28,28))
img = np.array(img)
img = (255-img)/255
plt.imshow(img, cmap='gray')
plt.show()

img.shape,x_train.shape
((28, 28), (42000, 28, 28))
result = first_model.predict(img.reshape(-1,28,28))
1/1 [==============================] - 0s 154ms/step
result.shape
(1, 10)
np.argmax(np.round(result,2))
8
class_names[np.argmax(np.round(result,2))]
'Bag'
img = Image.open('/content/drive/MyDrive/Colab Notebooks/sesac_deeplearning/04_fashion_mnist_img/img03.jpg')
img = img.convert('L')
img = img.resize((28,28))
img = np.array(img)
img = (255-img)/255
plt.imshow(img, cmap='gray')
plt.show()

result = first_model.predict(img.reshape(-1,28,28))
np.argmax(np.round(result,2))
class_names[np.argmax(np.round(result,2))]
1/1 [==============================] - 0s 17ms/step





'Sandal'
img = Image.open('/content/drive/MyDrive/Colab Notebooks/sesac_deeplearning/04_fashion_mnist_img/img04.jpg')
img = img.convert('L')
img = img.resize((28,28))
img = np.array(img)
img = (255-img)/255
plt.imshow(img, cmap='gray')
plt.show()

result = first_model.predict(img.reshape(-1,28,28))
np.argmax(np.round(result,2))
class_names[np.argmax(np.round(result,2))]
1/1 [==============================] - 0s 45ms/step





'Shirt'

보스턴 주택 가격 예측

from keras.datasets.boston_housing import load_data
(x_train, y_train), (x_test, y_test) = load_data()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/boston_housing.npz
57026/57026 [==============================] - 0s 0us/step
x_train.shape,x_test.shape
((404, 13), (102, 13))
y_train.shape
(404,)
y_train[0]
15.2
import numpy as np
mean = np.mean(x_train)
std = np.std(x_train)

x_train = (x_train-mean)/std
x_test = (x_test-mean)/std
from sklearn.model_selection import train_test_split
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.33,random_state=777)
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(64, activation='relu',input_shape=(13,)))
model.add(Dense(32, activation='relu'))
model.add(Dense(1))

model.compile(optimizer='adam',loss='mse',metrics=['mae'])

history = model.fit(x_train,y_train,epochs=300,validation_data=(x_val,y_val))
Epoch 1/300
9/9 [==============================] - 1s 25ms/step - loss: 566.8552 - mae: 21.7598 - val_loss: 558.5321 - val_mae: 22.0390
Epoch 2/300
9/9 [==============================] - 0s 9ms/step - loss: 532.5699 - mae: 20.9582 - val_loss: 525.8225 - val_mae: 21.2576
Epoch 3/300
9/9 [==============================] - 0s 6ms/step - loss: 499.9990 - mae: 
Epoch 298/300
13/13 [==============================] - 0s 6ms/step - loss: 32.3781 - mae: 3.8562 - val_loss: 26.3398 - val_mae: 3.7715
Epoch 299/300
13/13 [==============================] - 0s 7ms/step - loss: 32.3474 - mae: 3.9625 - val_loss: 24.9370 - val_mae: 3.5173
Epoch 300/300
13/13 [==============================] - 0s 6ms/step - loss: 32.5553 - mae: 3.9934 - val_loss: 25.2812 - val_mae: 3.4160
4/4 [==============================] - 0s 3ms/step - loss: 37.1221 - mae: 4.3381
print(mae_list)
print(np.mean(mae_list))
[4.550167083740234, 4.593438148498535, 4.338122367858887]
4.493909200032552

옷 종류와 색 맞추기

from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
  • clothes_dataset.zip 압축해제
!pwd
/content
!mkdir clothes_dataset
mkdir: cannot create directory ‘clothes_dataset’: File exists
!unzip '/content/drive/MyDrive/Colab Notebooks/sesac_deeplearning/clothes_dataset.zip' -d ./clothes_dataset/
스트리밍 출력 내용이 길어서 마지막 5000줄이 삭제되었습니다.
  
  • 데이터 불러오기
import numpy as np
import pandas as pd
import tensorflow as tf
import glob as glob
import cv2

all_data = np.array(glob.glob('/content/clothes_dataset/*/*.jpg', recursive=True))

# 색과 옷의 종류를 구별하기 위해 해당되는 label에 1을 삽입합니다.
def check_cc(color, clothes):
    labels = np.zeros(11,)
    
    # color check
    if(color == 'black'):
        labels[0] = 1
        color_index = 0
    elif(color == 'blue'):
        labels[1] = 1
        color_index = 1
    elif(color == 'brown'):
        labels[2] = 1
        color_index = 2
    elif(color == 'green'):
        labels[3] = 1
        color_index = 3
    elif(color == 'red'):
        labels[4] = 1
        color_index = 4
    elif(color == 'white'):
        labels[5] = 1
        color_index = 5
        
    # clothes check
    if(clothes == 'dress'):
        labels[6] = 1
    elif(clothes == 'shirt'):
        labels[7] = 1
    elif(clothes == 'pants'):
        labels[8] = 1
    elif(clothes == 'shorts'):
        labels[9] = 1
    elif(clothes == 'shoes'):
        labels[10] = 1
        
    return labels, color_index

# label과 color_label을 담을 배열을 선언합니다.
all_labels = np.empty((all_data.shape[0], 11))
all_color_labels = np.empty((all_data.shape[0], 1))
# print(all_data[0])
for i, data in enumerate(all_data):
    color_and_clothes = all_data[i].split('/')[-2].split('_')

    color = color_and_clothes[0]
    clothes = color_and_clothes[1]
    # print(color,clothes)
    
    labels, color_index = check_cc(color, clothes)
    all_labels[i] = labels
    all_color_labels[i] = color_index
    
all_labels = np.concatenate((all_labels, all_color_labels), axis = -1)
all_data
array(['/content/clothes_dataset/red_shoes/57007b1e36f9b86f2832005bf20de8d3fe12b518.jpg',
       '/content/clothes_dataset/red_shoes/5e006b1eab73efeaa91fb76aa7c2d6e24706e60f.jpg',
       '/content/clothes_dataset/red_shoes/c23f9fcb3caebad169fd4b671cf71fd196fed7e3.jpg',
       ...,
       '/content/clothes_dataset/blue_shirt/83c86d0baf7782dc40aced68d451ad835bce930c.jpg',
       '/content/clothes_dataset/blue_shirt/7b0dae0a9bd09af24390c50089e14ed5874c060c.jpg',
       '/content/clothes_dataset/blue_shirt/c93ff1693d6d827ff4262c7bcf24c0d44ce397be.jpg'],
      dtype='<U82')
all_labels.shape
(11385, 12)
from sklearn.model_selection import train_test_split

# 훈련, 검증, 테스트 데이터셋으로 나눕니다.
train_x, test_x, train_y, test_y = train_test_split(all_data, all_labels, shuffle = True, test_size = 0.3,
                                                   random_state = 99)
train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, shuffle = True, test_size = 0.3,
                                                 random_state = 99)
train_df = pd.DataFrame({'image':train_x, 'black':train_y[:, 0], 'blue':train_y[:, 1],
                        'brown':train_y[:, 2], 'green':train_y[:, 3], 'red':train_y[:, 4],
                        'white':train_y[:, 5], 'dress':train_y[:, 6], 'shirt':train_y[:, 7],
                        'pants':train_y[:, 8], 'shorts':train_y[:, 9], 'shoes':train_y[:, 10],
                        'color':train_y[:, 11]})

val_df = pd.DataFrame({'image':val_x, 'black':val_y[:, 0], 'blue':val_y[:, 1],
                        'brown':val_y[:, 2], 'green':val_y[:, 3], 'red':val_y[:, 4],
                        'white':val_y[:, 5], 'dress':val_y[:, 6], 'shirt':val_y[:, 7],
                        'pants':val_y[:, 8], 'shorts':val_y[:, 9], 'shoes':val_y[:, 10],
                        'color':val_y[:, 11]})

test_df = pd.DataFrame({'image':test_x, 'black':test_y[:, 0], 'blue':test_y[:, 1],
                        'brown':test_y[:, 2], 'green':test_y[:, 3], 'red':test_y[:, 4],
                        'white':test_y[:, 5], 'dress':test_y[:, 6], 'shirt':test_y[:, 7],
                        'pants':test_y[:, 8], 'shorts':test_y[:, 9], 'shoes':test_y[:, 10],
                        'color':test_y[:, 11]})
train_df.head()
image black blue brown green red white dress shirt pants shorts shoes color
0 /content/clothes_dataset/green_shorts/e74d11d3... 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 3.0
1 /content/clothes_dataset/black_dress/f1be32393... 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0
2 /content/clothes_dataset/black_shoes/04f78f68a... 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
3 /content/clothes_dataset/brown_pants/0671d132b... 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 2.0
4 /content/clothes_dataset/white_shoes/59803fb01... 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 5.0

<svg xmlns="http://www.w3.org/2000/svg" height="24px"viewBox="0 0 24 24"
width="24px">



  <script>
    const buttonEl =
      document.querySelector('#df-27f4c4cc-2edf-4f54-8e6d-1f2ac6a62a64 button.colab-df-convert');
    buttonEl.style.display =
      google.colab.kernel.accessAllowed ? 'block' : 'none';

    async function convertToInteractive(key) {
      const element = document.querySelector('#df-27f4c4cc-2edf-4f54-8e6d-1f2ac6a62a64');
      const dataTable =
        await google.colab.kernel.invokeFunction('convertToInteractive',
                                                 [key], {});
      if (!dataTable) return;

      const docLinkHtml = 'Like what you see? Visit the ' +
        '<a target="_blank" href=https://colab.research.google.com/notebooks/data_table.ipynb>data table notebook</a>'
        + ' to learn more about interactive tables.';
      element.innerHTML = '';
      dataTable['output_type'] = 'display_data';
      await google.colab.output.renderOutput(dataTable, element);
      const docLink = document.createElement('div');
      docLink.innerHTML = docLinkHtml;
      element.appendChild(docLink);
    }
  </script>
</div>
# 저장할 경로
!mkdir csv_data
mkdir: cannot create directory ‘csv_data’: File exists
# 저장
train_df.to_csv('/content/csv_data/train.csv', index=False)
val_df.to_csv('/content/csv_data/val.csv', index=False)
test_df.to_csv('/content/csv_data/test.csv', index=False)
# 이미지 제너레이터 정의하기
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)

def get_steps(num_sampels,batch_size):
  if (num_sampels % batch_size) > 0 :
    return (num_sampels // batch_size) + 1
  else:
     return (num_sampels // batch_size)
# 모델 만들기
from keras.models import Sequential
from keras.layers import Dense, Flatten
model = Sequential()
model.add(Flatten(input_shape=(112,112,3))) # RGB값으로 색 지정
model.add(Dense(128,activation='relu'))
model.add(Dense(64,activation='relu'))
model.add(Dense(11,activation='sigmoid')) # 11개의 출력을 가지는 신경망
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy'])
model.summary()
Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 flatten_1 (Flatten)         (None, 37632)             0         
                                                                 
 dense_3 (Dense)             (None, 128)               4817024   
                                                                 
 dense_4 (Dense)             (None, 64)                8256      
                                                                 
 dense_5 (Dense)             (None, 11)                715       
                                                                 
=================================================================
Total params: 4,825,995
Trainable params: 4,825,995
Non-trainable params: 0
_________________________________________________________________
train_df.columns
Index(['image', 'black', 'blue', 'brown', 'green', 'red', 'white', 'dress',
       'shirt', 'pants', 'shorts', 'shoes', 'color'],
      dtype='object')
# 데이터 제너레이터 정의하기
batch_size = 32
class_col =['black', 'blue', 'brown', 'green', 'red', 'white', 'dress','shirt', 'pants', 'shorts', 'shoes']

train_generator = train_datagen.flow_from_dataframe(dataframe=train_df,
                                                    x_col='image',
                                                    y_col=class_col,
                                                    target_size=(112,112),
                                                    color_mode='rgb',
                                                    class_mode='raw',
                                                    batch_size=batch_size,
                                                    shuffle=True,
                                                    seed=42)
val_generator = val_datagen.flow_from_dataframe(dataframe=val_df,
                                                    x_col='image',
                                                    y_col=class_col,
                                                    target_size=(112,112),
                                                    color_mode='rgb',
                                                    class_mode='raw',
                                                    batch_size=batch_size,
                                                    shuffle=True
                                                 )
Found 5578 validated image filenames.
Found 2391 validated image filenames.
model.fit(train_generator,
          steps_per_epoch=get_steps(len(train_df),batch_size), 
          validation_data=val_generator,
          validation_steps=get_steps(len(val_df),batch_size),
          epochs = 10)
Epoch 1/10
175/175 [==============================] - 28s 157ms/step - loss: 0.5666 - binary_accuracy: 0.8415 - val_loss: 0.2993 - val_binary_accuracy: 0.8875
Epoch 2/10
175/175 [==============================] - 29s 165ms/step - loss: 0.3014 - binary_accuracy: 0.8808 - val_loss: 0.3139 - val_binary_accuracy: 0.8778
Epoch 3/10
175/175 [==============================] - 27s 154ms/step - loss: 0.2451 - binary_accuracy: 0.9035 - val_loss: 0.2435 - val_binary_accuracy: 0.9037
Epoch 4/10
175/175 [==============================] - 34s 192ms/step - loss: 0.2215 - binary_accuracy: 0.9130 - val_loss: 0.2104 - val_binary_accuracy: 0.9182
Epoch 5/10
175/175 [==============================] - 32s 180ms/step - loss: 0.2134 - binary_accuracy: 0.9158 - val_loss: 0.2368 - val_binary_accuracy: 0.9118
Epoch 6/10
175/175 [==============================] - 30s 169ms/step - loss: 0.1952 - binary_accuracy: 0.9232 - val_loss: 0.2123 - val_binary_accuracy: 0.9210
Epoch 7/10
175/175 [==============================] - 28s 160ms/step - loss: 0.1900 - binary_accuracy: 0.9256 - val_loss: 0.1868 - val_binary_accuracy: 0.9272
Epoch 8/10
175/175 [==============================] - 33s 189ms/step - loss: 0.1800 - binary_accuracy: 0.9297 - val_loss: 0.2549 - val_binary_accuracy: 0.9053
Epoch 9/10
175/175 [==============================] - 31s 177ms/step - loss: 0.1699 - binary_accuracy: 0.9329 - val_loss: 0.1640 - val_binary_accuracy: 0.9369
Epoch 10/10
175/175 [==============================] - 32s 186ms/step - loss: 0.1587 - binary_accuracy: 0.9381 - val_loss: 0.2165 - val_binary_accuracy: 0.9186





<keras.callbacks.History at 0x7f69924b0e20>
test_datagen = ImageDataGenerator(rescale=1./255)

test_generator = test_datagen.flow_from_dataframe(dataframe=test_df,
                                                    x_col='image',
                                                    target_size=(112,112),
                                                    color_mode='rgb',
                                                    class_mode=None,
                                                    batch_size=batch_size,
                                                    shuffle=False,
                                                  )
preds = model.predict(test_generator,steps=get_steps(len(test_df),batch_size),verbose=1)
Found 3416 validated image filenames.
107/107 [==============================] - 12s 109ms/step
np.round(preds[0],2)
array([0.  , 0.  , 0.  , 1.  , 0.  , 0.  , 0.  , 1.  , 0.  , 0.02, 0.  ],
      dtype=float32)
import matplotlib.pyplot as plt
# 테스트 데이터 예측하기
do_preds = preds[:8]
for i ,pred in enumerate(do_preds):
  plt.subplot(2,4,i+1)
  prob = zip(class_col,list(pred))
  # print(list(prob))
  prob = sorted(list(prob),key=lambda x:x[1],reverse=True)
  # print(prob)
  image = cv2.imread(test_df['image'][i])
  image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
  plt.imshow(image)
  plt.title(f'{prob[0][0]}:{round(prob[0][1]*100,2)}% \n{prob[1][0]}:{round(prob[1][1]*100,2)}%')
plt.tight_layout()
plt.show()

data_datagen = ImageDataGenerator(rescale=1./255)

data_generator = data_datagen.flow_from_directory(
                                                    directory='/content/drive/MyDrive/Colab Notebooks/sesac_deeplearning/06_clothes_img',
                                                    target_size=(112,112),
                                                    color_mode='rgb',
                                                    batch_size=batch_size,
                                                    shuffle=False,
                                                  )
result = model.predict(data_generator,steps=get_steps(2,batch_size),verbose=1)
Found 4 images belonging to 1 classes.
1/1 [==============================] - 0s 89ms/step
result
array([[1.52786851e-01, 6.52106421e-04, 9.56296504e-01, 1.85247824e-01,
        8.73344397e-05, 2.28309003e-03, 1.27795630e-03, 1.63213081e-05,
        4.72517684e-03, 2.22380459e-03, 9.80769515e-01],
       [4.02874887e-01, 1.39958924e-04, 2.90269911e-01, 2.09356956e-02,
        4.10276145e-04, 2.19159517e-02, 5.00542298e-03, 1.35187315e-06,
        1.68185332e-03, 1.23237018e-02, 9.77578521e-01],
       [1.79782207e-03, 4.62408469e-04, 4.34684247e-01, 6.77505648e-03,
        7.25663122e-05, 6.68699384e-01, 1.03075884e-01, 1.37986478e-06,
        2.92289741e-02, 2.66093817e-02, 7.58549571e-01],
       [9.93593596e-03, 6.96583709e-04, 1.71822265e-01, 2.91292294e-04,
        1.42630748e-03, 3.08194607e-01, 6.62644506e-02, 4.05802979e-07,
        1.50276301e-02, 2.48940680e-02, 6.62990630e-01]], dtype=float32)
np.round(result,2)
array([[0.15, 0.  , 0.96, 0.19, 0.  , 0.  , 0.  , 0.  , 0.  , 0.  , 0.98],
       [0.4 , 0.  , 0.29, 0.02, 0.  , 0.02, 0.01, 0.  , 0.  , 0.01, 0.98],
       [0.  , 0.  , 0.43, 0.01, 0.  , 0.67, 0.1 , 0.  , 0.03, 0.03, 0.76],
       [0.01, 0.  , 0.17, 0.  , 0.  , 0.31, 0.07, 0.  , 0.02, 0.02, 0.66]],
      dtype=float32)
for i, pred in enumerate(result):
  prob = zip(class_col,list(pred))
  prob = sorted(list(prob),key=lambda x:x[1],reverse=True)
  print((f'{prob[0][0]}:{round(prob[0][1]*100,2)}% \n{prob[1][0]}:{round(prob[1][1]*100,2)}%'))
shoes:98.08% 
brown:95.63%
shoes:97.76% 
black:40.29%
shoes:75.85% 
white:66.87%
shoes:66.3% 
white:30.82%
profile
초보 개발자의 학습 저장용 블로그

0개의 댓글