제로베이스 데이터취업스쿨 DAY86-107 자동차브랜드분류 PyTorch

NAYOUNG KIM·2023년 7월 21일
0
post-thumbnail

VGGNet

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
import os

from torchvision import datasets, transforms
from torchsummary import summary
from tqdm import tqdm

from google.colab import drive

import matplotlib.pyplot as plt
%matplotlib inline
drive.mount('/content/drive')

# 데이터셋 경로
train_path = '/content/drive/MyDrive/딥러닝_프로젝트/Car_Brand_Logos/Train'
test_path = '/content/drive/MyDrive/딥러닝_프로젝트/Car_Brand_Logos/Test'

# cuda
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
resize_trans = transforms.Compose([
    transforms.Resize((256,256)),
    transforms.ToTensor()
])

train_set = datasets.ImageFolder(root=train_path, transform=resize_trans)
test_set = datasets.ImageFolder(root=test_path, transform=resize_trans)

train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=32, shuffle=False)

# vgg16 모델 불러오기
vgg = models.vgg16(pretrained=True).to(device)

# 가중치 바뀌지 않도록 설정
for param in vgg.features.parameters():
   param.requires_grad = False

# vgg 
# 마지막 classifier 확인
# in_features=25088

# 기존에 정의되어 있던 classifier의 fc layer 직접 변경
vgg.classifier = nn.Sequential(
    nn.Linear(25088, 256),
    nn.ReLU(),
    nn.Dropout(0.5),

    nn.Linear(256, 64),
    nn.ReLU(),
    nn.Dropout(0.5),

    nn.Linear(64, 8)
)

# vgg
# train
def model_train(model, data_loader, loss_fn, optimizer, device):

  model.train()

  running_loss = 0
  corr = 0

  progress_bar = tqdm(data_loader)

  for img, lbl in progress_bar:
    img, lbl = img.to(device), lbl.to(device)
    optimizer.zero_grad()
    output = model(img)
    loss = loss_fn(output, lbl)
    loss.backward() # 미분 값 계산
    optimizer.step() # 계산된 Gradient를 업데이트

    _, pred = output.max(dim=1)

    corr += pred.eq(lbl).sum().item() # 정확히 맞춘 label의 합계 계산
    running_loss += loss.item() * img.size(0) # 1개 배치의 전체 loss 계산

  acc = corr / len(data_loader.dataset)
  loss = running_loss / len(data_loader.dataset)

  return loss, acc

# eval
def model_eval(model, data_loader, loss_fn, device):

  model.eval()

  with torch.no_grad():
    corr = 0
    running_loss = 0

    for img, lbl in data_loader:
      img, lbl = img.to(device), lbl.to(device)
      
      output = model(img)
      
      _, pred = output.max(dim=1)

      corr += torch.sum(pred.eq(lbl)).item() 
      running_loss += loss_fn(output, lbl).item() * img.size(0)

  acc = corr / len(data_loader.dataset)
  loss = running_loss / len(data_loader.dataset)

  return loss, acc
vgg = vgg.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(vgg.parameters(), lr=0.003)

num_epochs = 50

train_loss_list = []
train_accuracy_list = []
test_loss_list = []
test_accuracy_list = []

for epoch in range(num_epochs):
  
  train_loss, train_acc = model_train(vgg, train_loader, criterion, optimizer, device)
  
  train_loss_list.append(train_loss)
  train_accuracy_list.append(train_acc)

  test_loss, test_acc = model_eval(vgg, test_loader, criterion, device)
  
  test_loss_list.append(test_loss)
  test_accuracy_list.append(test_acc)

  print(f'epoch {epoch+1:02d}, loss: {train_loss:.5f}, acc: {train_acc:.5f}, val_loss: {test_loss:.5f}, val_accuracy: {test_acc:.5f}')

ResNet

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
import os

from torchvision import datasets, transforms
from torchsummary import summary
from tqdm import tqdm

from google.colab import drive

import matplotlib.pyplot as plt
%matplotlib inline
drive.mount('/content/drive')

# 데이터셋 경로
train_path = '/content/drive/MyDrive/딥러닝_프로젝트/Car_Brand_Logos/Train'
test_path = '/content/drive/MyDrive/딥러닝_프로젝트/Car_Brand_Logos/Test'

# cuda
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
resize_trans = transforms.Compose([
    transforms.Resize((256,256)),
    transforms.ToTensor(),
    # 정규화
    transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]),
    # 데이터증강
    transforms.RandomCrop([180,180]),
    transforms.RandomVerticalFlip(p=0.3),
    transforms.RandomHorizontalFlip(p=0.3),
    transforms.RandomInvert(),
    transforms.RandomAffine(60),
    transforms.RandomPerspective(),
    transforms.RandomGrayscale(0.3)
])

train_set = datasets.ImageFolder(root=train_path, transform=resize_trans)
test_set = datasets.ImageFolder(root=test_path, transform=resize_trans)

train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=32, shuffle=False)

# resnet50 모델 불러오기
resnet = models.resnet50(pretrained=True)

# 가중치 바뀌지 않도록 설정(freezing)
for param in resnet.parameters():
   param.requires_grad = False

# resnet 
# 마지막 fc 수정
fc_in_features = resnet.fc.in_features
resnet.fc = nn.Linear(fc_in_features, 8)
resnet = resnet.to(device)

# summary(resnet, (3, 256, 256))
# train
def model_train(model, data_loader, loss_fn, optimizer, device):

  model.train()

  running_loss = 0
  corr = 0

  progress_bar = tqdm(data_loader)

  for img, lbl in progress_bar:
    img, lbl = img.to(device), lbl.to(device)
    optimizer.zero_grad()
    output = model(img)
    loss = loss_fn(output, lbl)
    loss.backward() # 미분 값 계산
    optimizer.step() # 계산된 Gradient를 업데이트

    _, pred = output.max(dim=1)

    corr += pred.eq(lbl).sum().item() # 정확히 맞춘 label의 합계 계산
    running_loss += loss.item() * img.size(0) # 1개 배치의 전체 loss 계산

  acc = corr / len(data_loader.dataset)
  loss = running_loss / len(data_loader.dataset)

  return loss, acc

# eval
def model_eval(model, data_loader, loss_fn, device):

  model.eval()

  with torch.no_grad():
    corr = 0
    running_loss = 0

    for img, lbl in data_loader:
      img, lbl = img.to(device), lbl.to(device)
      
      output = model(img)
      
      _, pred = output.max(dim=1)

      corr += torch.sum(pred.eq(lbl)).item() 
      running_loss += loss_fn(output, lbl).item() * img.size(0)

  acc = corr / len(data_loader.dataset)
  loss = running_loss / len(data_loader.dataset)

  return loss, acc
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(resnet.parameters(), lr=0.003)

num_epochs = 50

train_loss_list = []
train_accuracy_list = []
test_loss_list = []
test_accuracy_list = []

for epoch in range(num_epochs):
  
  train_loss, train_acc = model_train(resnet, train_loader, criterion, optimizer, device)
  
  train_loss_list.append(train_loss)
  train_accuracy_list.append(train_acc)

  test_loss, test_acc = model_eval(resnet, test_loader, criterion, device)
  
  test_loss_list.append(test_loss)
  test_accuracy_list.append(test_acc)

  print(f'epoch {epoch+1:02d}, loss: {train_loss:.5f}, acc: {train_acc:.5f}, val_loss: {test_loss:.5f}, val_accuracy: {test_acc:.5f}')
profile
21세기 주인공

1개의 댓글

comment-user-thumbnail
2023년 7월 21일

VGGNet과 ResNet을 이용한 딥러닝 모델링 내용이 흥미로웠습니다. 코드 설명이 상세해서 따라해보면서 많이 배울 수 있었습니다. 잘 읽었습니다. 계속해서 좋은 글 부탁드려요!

답글 달기