2023. 11. 24. 14:13ㆍML&DL/CV
[교통 수단 Object Detection]
목표
YOLOv8n 을 사용하여 8개의 클래스 교통수단 Object Detection
카테고리
-bicycle
- car
- motorcycle
- airplane
- bus
- train
- truck
- boat
데이터셋
- COCO dataset 중 val2017 사용
[Computer Vision] COCO 데이터셋
COCO Dataset https://cocodataset.org/#home COCO - Common Objects in Context cocodataset.org COCO 데이터셋은 객체 인식과 분할을 위한 대규모 데이터셋 중 하나로 널리 사용되고 있는 데이터셋 중 하나다. 약 33만 개의
situdy.tistory.com
개발 환경
* Colab
epoch 많이 돌릴 것이므로 GPU 사용해줍니다.
- 런타임 유형 변경
import torch
import torch.nn as nn
import torchvision.datasets as dsets
torch.cuda.is_available()
=> True 로 나오면 GPU 연결된 것
💻 실습
데이터셋 다운로드 - COCO dataset의 val2017만 사용
!wget http://images.cocodataset.org/zips/val2017.zip
!wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
데이터셋 압축 풀기
!unzip val2017.zip -d /content/drive/MyDrive/datasets/coco
!unzip annotations_trainval2017.zip -d /content/drive/MyDrive/datasets/annotations
COCO dataset 의 카테고리
- .json 파일을 이용해 카테고리 리스트 출력
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
dataDir='..'
dataType='val2017'
annFile='/content/drive/MyDrive/datasets/annotations/instances_val2017.json'.format(dataDir,dataType)
# initialize COCO api for instance annotations
coco=COCO(annFile)
COCO categories:
person bicycle car motorcycle airplane bus train truck boat traffic light fire hydrant stop sign parking meter bench bird cat dog horse sheep cow elephant bear zebra giraffe backpack umbrella handbag tie suitcase frisbee skis snowboard sports ball kite baseball bat baseball glove skateboard surfboard tennis racket bottle wine glass cup fork knife spoon bowl banana apple sandwich orange broccoli carrot hot dog pizza donut cake chair couch potted plant bed dining table toilet tv laptop mouse remote keyboard cell phone microwave oven toaster sink refrigerator book clock vase scissors teddy bear hair drier toothbrush
COCO supercategories:
vehicle outdoor kitchen person animal appliance electronic accessory indoor sports furniture food
- 본 문제에서는 super categories 중 'vehicle' 사용
- vehicle에 포함되는 카테고리 :
[ bicycle, car, motorcycle, airplane, bus, train, truck, boat ]
- vehicle 카테고리 사진 출력해보기
ex) bus, motorcycle
catIds = coco.getCatIds(catNms=['bus','motorcycle']);
imgIds = coco.getImgIds(catIds=catIds );
imgIds = coco.getImgIds(imgIds =imgIds)
img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]
I = io.imread(img['coco_url'])
plt.axis('off')
plt.imshow(I)
plt.show()
-> bus,motorcycle 객체가 있는 사진을 확인할 수 있음
* Vehicle 카테고리 Id 출력
target_categories = ['bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat']
category_ids = []
for cat_name in target_categories:
cat_id = coco.getCatIds(catNms=[cat_name])
if cat_id:
category_ids.extend(cat_id)
print('Category names:', target_categories)
print('Category IDs:', category_ids)
* Vehicle 카테고리만 추출한 Annotation.json 파일 만들기
- Vehicle에 해당하는 카테고리 ID를 이용함
import os
import json
from pycocotools.coco import COCO
def filter_coco_data(coco, class_names):
filtered_images = []
filtered_annotations = []
for class_name in class_names:
cat_ids = coco.getCatIds(catNms=[class_name])
img_ids = coco.getImgIds(catIds=cat_ids)
ann_ids = coco.getAnnIds(catIds=cat_ids)
images = coco.loadImgs(ids=img_ids)
annotations = coco.loadAnns(ids=ann_ids)
filtered_images.extend(images)
filtered_annotations.extend(annotations)
return filtered_images, filtered_annotations
# COCO 데이터셋 경로와 특정 클래스명을 설정합니다.
data_dir = '/content/drive/MyDrive/datasets/coco'
data_type = 'val2017'
ann_file = '/content/drive/MyDrive/datasets/annotations/instances_val2017.json'
coco = COCO(ann_file)
# 원하는 클래스명을 리스트에 추가하세요.
SUBCLASS = ['bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat']
filtered_images, filtered_annotations = filter_coco_data(coco, SUBCLASS)
# 새로운 JSON 파일을 생성합니다.
filter_name = "_".join(SUBCLASS)
new_ann_file ='/content/drive/MyDrive/datasets/instances_vehicle.json'
with open(new_ann_file, 'w') as f:
json.dump({
'images': filtered_images,
'annotations': filtered_annotations,
'categories': coco.loadCats(coco.getCatIds())
}, f)
* 이미지 파일과 라벨링 텍스트 매핑 시키기
- 원본 데이터셋에 카테고리 ID가 2,3,4,5,6,7,8,9 로 지정되어있는 Vehicle 클래스들을 0 부터 차례로 매핑해줍니다.
category_ids = [2, 3, 4, 5, 6, 7, 8, 9]
mapping = {2:0,
3:1,
4:2,
5:3,
6:4,
7:5,
8:6,
9:7,
}
import shutil
import os
def convert_coco_to_yolo(coco, annotations, img_dir, output_dir, mapping=mapping):
yolo_labels_dir = os.path.join(output_dir, 'labels')
yolo_images_dir = os.path.join(output_dir, 'images')
os.makedirs(yolo_labels_dir, exist_ok=True)
os.makedirs(yolo_images_dir, exist_ok=True)
for ann in annotations:
img = coco.loadImgs(ids=[ann['image_id']])[0]
img_width, img_height = img['width'], img['height']
x, y, width, height = ann['bbox']
# Normalize bbox coordinates
x_center = (x + width / 2) / img_width
y_center = (y + height / 2) / img_height
width /= img_width
height /= img_height
# Write YOLO label file
label_file = os.path.join(yolo_labels_dir, f"{img['file_name'].replace('.jpg', '.txt')}")
with open(label_file, 'a') as f:
f.write(f"{mapping[ann['category_id']]} {x_center} {y_center} {width} {height}\n")
# Copy the image file to YOLO images folder
src_img_file = os.path.join(img_dir, img['file_name'])
dst_img_file = os.path.join(yolo_images_dir, img['file_name'])
shutil.copyfile(src_img_file, dst_img_file)
img_dir = os.path.join(data_dir, data_type)
output_dir = '/content/drive/MyDrive/datasets'
convert_coco_to_yolo(coco, filtered_annotations, img_dir, output_dir)
* train/val/test 폴더 나누기
- 8:1:1로 나누어줍니다.
import os
import shutil
from sklearn.model_selection import train_test_split
# 원래 데이터셋이 있는 폴더 경로
image_folder = "/content/drive/MyDrive/datasets/images/"
label_folder = "/content/drive/MyDrive/datasets/labels/"
# 목표 폴더 경로
target_folder = "/content/drive/MyDrive/datasets/traing_dataset/"
os.makedirs(target_folder, exist_ok=True)
# 훈련, 검증, 테스트 비율
train_ratio = 0.8
val_ratio = 0.1
test_ratio = 0.1
# 모든 이미지 파일 가져오기
all_images = os.listdir(image_folder)
# 이미지와 텍스트 파일을 같이 나누기
train_images, rest_images = train_test_split(all_images, test_size=(val_ratio + test_ratio), random_state=42)
val_images, test_images = train_test_split(rest_images, test_size=test_ratio/(val_ratio + test_ratio), random_state=42)
# 각 세트에 해당하는 폴더 생성
for set_name, set_images in zip(['train', 'val', 'test'], [train_images, val_images, test_images]):
set_folder = os.path.join(target_folder, set_name)
os.makedirs(set_folder, exist_ok=True)
# 'images'와 'labels' 서브폴더 생성
os.makedirs(os.path.join(set_folder, 'images'), exist_ok=True)
os.makedirs(os.path.join(set_folder, 'labels'), exist_ok=True)
# 이미지 이동
for image in set_images:
src_image_path = os.path.join(image_folder, image)
dst_image_path = os.path.join(set_folder, 'images', image)
shutil.move(src_image_path, dst_image_path)
# 텍스트 파일 찾기
label_file_name = image.replace('.jpg', '.txt')
src_label_path = os.path.join(label_folder, label_file_name)
dst_label_path = os.path.join(set_folder, 'labels', label_file_name)
shutil.move(src_label_path, dst_label_path)
* .yaml 파일 생성
- 학습에 사용하기 위한 yaml 파일을 만들어줍니다.
- train/ val/ test 경로 지정해주기
- 클래스 지정
class_name = 'vehicle'
#Category names: ['bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat']
#Category IDs: [2, 3, 4, 5, 6, 7, 8, 9]
config_txt = f'''
path: /content/drive/MyDrive/datasets/{class_name} # dataset root dir
train: /content/drive/MyDrive/datasets/traing_dataset/train/images
val: /content/drive/MyDrive/datasets/traing_dataset/val/images
test: /content/drive/MyDrive/datasets/traing_dataset/test/images
# Classes
names:
0: bicycle
1: car
2: motorcycle
3: airplane
4: bus
5: train
6: truck
7: boat
'''
with open(f"/content/drive/MyDrive/datasets/{class_name}.yaml", 'w') as f:
f.write(config_txt)
* 모델 학습
- yolov8n 사용
- 베스트 모델, 최근 모델 저장
!pip install ultralytics
from ultralytics import YOLO
# Create a new YOLO model from scratch
model = YOLO('yolov8n.yaml')
# Load a pretrained YOLO model (recommended for training)
model = YOLO('yolov8n.pt')
# Train the model using the 'vehicle.yaml' dataset for 150 epochs
results = model.train(data=f'/content/drive/MyDrive/datasets/vehicle.yaml', epochs=150)
# Evaluate the model's performance on the validation set
results = model.val()
* 모델 테스트
from ultralytics import YOLO
from PIL import Image, ImageDraw, ImageFont
from glob import glob
import yaml
import numpy as np
import uuid
# Load a model
model = YOLO('/content/drive/MyDrive/datasets/best.pt')
classes = ['bicycle', 'car','motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat']
class_name = "vehicle"
# Load images
filelist = glob(f"/content/drive/MyDrive/datasets/training_dataset/train/images/*.jpg")
filelist = np.random.choice(filelist, size=1)
imgs = [Image.open(filename) for filename in filelist]
# Perform object detection on an image using the model
results = model(imgs)
def draw_bbox(draw, bbox, label, color=(0, 255, 0, 255), confs=None, size=12):
#font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuMathTeXGyre.ttf", size)
draw.rectangle(bbox, outline=color, width =3)
def set_alpha(color, value):
background = list(color)
background[3] = value
return tuple(background)
background = set_alpha(color, 50)
draw.rectangle(bbox, outline=color, fill=background, width =3)
background = set_alpha(color, 150)
text = f"{label}" + ("" if confs==None else f"({conf:0.3})")
text_bbox = bbox[0], bbox[1], bbox[0]+len(text)*10, bbox[1]+25
draw.rectangle(text_bbox, outline=color, fill=background, width =3)
draw.text((bbox[0]+5, bbox[1]+5), text, (0,0,0))
color = []
n_classes = len(classes)
for _ in range(n_classes):
c = list(np.random.choice(range(256), size=3)) + [255]
c = tuple(c)
color.append(c)
from IPython.display import display, Image as IPImage
for i, (img, result) in enumerate(zip(imgs, results)):
img = img.resize((640, 640))
width, height = img.size
draw = ImageDraw.Draw(img, 'RGBA')
unique_id = str(uuid.uuid4())[:8] # 8자리의 고유 ID 생성
origin_filename = f"/content/drive/MyDrive/datasets/origin_images/origin_image_{unique_id}.png"
img.save(origin_filename)
display(IPImage(filename=origin_filename))
for i, (img, result) in enumerate(zip(imgs, results)):
img = img.resize((640, 640))
width, height = img.size
draw = ImageDraw.Draw(img, 'RGBA')
result = result.cpu()
xyxys = result.boxes.xyxyn # box with xyxy format but normalized, (N, 4)
confs = result.boxes.conf # confidence score, (N, 1)
clss = result.boxes.cls # cls, (N, 1)
xyxys = xyxys.numpy()
clss = map(int, clss.numpy())
for xyxy, conf, cls in zip(xyxys, confs, clss):
xyxy = [xyxy[0]*width, xyxy[1]*height, xyxy[2]*width, xyxy[3]*height]
draw_bbox(draw, bbox=xyxy, label=classes[cls], color=color[cls], confs=confs, size=7
)
# 파일명을 고유하게 만들기 위해 i 대신 uuid 라이브러리 사용
unique_id = str(uuid.uuid4())[:8] # 8자리의 고유 ID 생성
img_filename = f"/content/drive/MyDrive/datasets/output_images/output_image_{unique_id}.png"
img.save(img_filename)
display(IPImage(filename=img_filename))
결과
'ML&DL > CV' 카테고리의 다른 글
[Computer Vision] GAN (Generative adversarial network) (0) | 2023.11.28 |
---|---|
[Object Detection] COCO 데이터셋을 이용한 food 객체 인식 (0) | 2023.11.27 |
[Computer Vision] COCO 데이터셋 (2) | 2023.11.24 |
[Binary Classification] MRI 데이터셋을 사용한 뇌종양 음성/양성 모델 (0) | 2023.11.22 |
[Object Detection] Yolo를 이용한 실내 공간의 객체 검출 (0) | 2023.11.21 |