CNN 맛보기

CSH_tech·2023년 9월 7일
0

데이터셋: AIHUB 생활폐기물 이미지 데이터
클래스: [비닐, 유리병, 종이, 캔, 페트병, 플라스틱]
사용 데이터: 클래스별 100개의 데이터 랜덤으로 샘플링
주요 라이브러리: torch==2.0.0+cu11.8


사용할 라이브러리 불러오기

from torch.utils.data import Dataset
from torchvision.io import read_image
from tqdm import tqdm

import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import os

기본 설정

device = torch.device('cuda')
data_path = 'data'
train_path = os.path.join(data_path, 'train')
val_path = os.path.join(data_path, 'val')
test_path = os.path.join(data_path, 'test')
img_width = 640
img_height = 640
batch_size = 32

Dataset load 및 data 형태 변환

class CustomImageDataset(Dataset):
    def __init__(self, data_path):
        self.data_dir = data_path
        self.images = [x for x in os.listdir(self.data_dir) if x.endswith('.jpg')]
        self.labels = [int(x.split('_')[0]) for x in os.listdir(self.data_dir) if x.endswith('.Json')]
        self.transform = transforms.Compose([transforms.Resize(size=(img_width, img_height), antialias=True)])
        self.target_transform = None

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        image_path = os.path.join(self.data_dir, self.images[idx])
        image = read_image(image_path) / 255.0
        label = self.labels[idx]
        if self.transform:
            image = self.transform(image)
        if self.target_transform:
            label = self.target_transform(label)
        return image, label
train_dataset = CustomImageDataset(train_path)
val_dataset = CustomImageDataset(val_path)
test_dataset = CustomImageDataset(test_path)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size)

Feature Extraction과 Classification

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, 3, padding='same')
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(64, 32, 3, padding='same')
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(32*160*160, 1024)
        self.fc2 = nn.Linear(1024, 6)
    
    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = x.view(x.shape[0], -1)
        x = torch.relu(self.fc1(x))
        x = nn.Dropout(0.3)(x)
        x = self.fc2(x)
        return x

loss / optimizer

net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters())

학습

for epoch in tqdm(range(10)):
    running_loss = 0.0
    for i, data in enumerate(train_loader, 0):
        inputs, labels = data
        inputs.to(device)
        labels.to(device)
        optimizer.zero_grad()

        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if i % 100 == 99:
            print(f'[{epoch + 1}, {i + 1}] loss: {running_loss / 100:.3f}')
            running_loss = 0.0

print('Finished Training')

결과

correct = 0
total = 0
with torch.no_grad():
    for data in tqdm(test_loader):
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print(f'Test accuracy: {100 * correct / total:.2f}%')

Test accuracy: 21.67%

profile
개발 초보자

0개의 댓글