# 텐서플로우 설치
import tensorflow as tf
tf.__version__
# COCO API installation : pycocotools 를 설치 => 이미 설치되어있음.
! pip install pycocotools
# 깃허브에 있는 레파지토리를, 파이썬의 코드로 clone 하는 방법
import os
import pathlib
if "models" in pathlib.Path.cwd().parts :
while "models" in pathlib.Path.cwd().part :
os.chdir('..')
elif not pathlib.Path('models').exists() :
! git clone --depth 1 https://github.com/tensorflow/models
### 여러분들의 레파지토리에서 pull 하고 싶을때
# os.chdir('/content/models')
# ! git pull
# Object Detection API 설치하기
# ! 느낌표 없이, 리눅스의 명령어를 실행시키고 싶으면, %%bash 라고 쓴후, 아래에다가
# 리눅스 명령어를 쓴다.
%%bash
cd models/research/
protoc object_detection/protos/*.proto --python_out=.
cp object_detection/packages/tf2/setup.py .
python -m pip install .
import tensorflow as tf
import os
import pathlib
import numpy as np
import zipfile
import matplotlib.pyplot as plt
from PIL import Image
from IPython.display import display
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
# 내 로컬에 설치된 레이블 파일을, 인덱스와 연결시킨다.
PATH_TO_LABELS = '/content/models/research/object_detection/data/mscoco_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS)
print(category_index)
def download_model(model_name, model_date):
base_url = 'http://download.tensorflow.org/models/object_detection/tf2/'
model_file = model_name + '.tar.gz'
model_dir = tf.keras.utils.get_file(fname=model_name,
origin=base_url + model_date + '/' + model_file,
untar=True)
return str(model_dir)
MODEL_DATE = '20200711'
MODEL_NAME = 'centernet_hg104_1024x1024_coco17_tpu-32'
PATH_TO_MODEL_DIR = download_model(MODEL_NAME, MODEL_DATE)
def load_model(model_dir) :
model_full_dir = model_dir + "/saved_model"
# Load saved model and build the detection function
detection_model = tf.saved_model.load(model_full_dir)
return detection_model
detection_model = load_model(PATH_TO_MODEL_DIR)
PATH_TO_MODEL_DIR
# 우리가 가지고 있는 이미지 경로에서 이미지를 가져오는 코드
PATH_TO_IMAGE_DIR = pathlib.Path('/content/models/research/object_detection/test_images')
IMAGE_PATHS = sorted(list( PATH_TO_IMAGE_DIR.glob('*.jpg') ))
IMAGE_PATHS
### Object Detection ###
def show_inference(detection_model, image_np) :
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image_np)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# input_tensor = np.expand_dims(image_np, 0)
detections = detection_model(input_tensor)
# print(detections)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
# print(detections)
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.30,
agnostic_mode=False)
display(Image.fromarray(image_np_with_detections))
for image_path in IMAGE_PATHS :
image_np = np.array(Image.open(image_path))
show_inference(detection_model, image_np)
# 모델을 바꿔서 할 수 있다.
# /20200711/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.tar.gz
MODEL_DATE = '20200711'
MODEL_NAME = 'ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8'
PATH_TO_MODEL_DIR = download_model(MODEL_NAME, MODEL_DATE)
def load_model(model_dir) :
model_full_dir = model_dir + "/saved_model"
# Load saved model and build the detection function
detection_model = tf.saved_model.load(model_full_dir)
return detection_model
detection_model = load_model(PATH_TO_MODEL_DIR)
PATH_TO_MODEL_DIR
#### Object Detection ###########
def show_inference(detection_model, image_np) :
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image_np)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# input_tensor = np.expand_dims(image_np, 0)
detections = detection_model(input_tensor)
# print(detections)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
# print(detections)
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=100,
min_score_thresh=.50,
agnostic_mode=False)
display(Image.fromarray(image_np_with_detections))
for image_path in IMAGE_PATHS :
image_np = np.array( Image.open(image_path) )
show_inference(detection_model, image_np)
'Tensorflow' 카테고리의 다른 글
Python에서 NumPy 배열을 PIL 이미지로 변환 (0) | 2021.12.31 |
---|---|
[vscode] 가상환경 삭제 명령어 (0) | 2021.12.31 |
[object detection] 동영상 코드 (0) | 2021.12.30 |
[object detection] 카메라 영상 디텍션 (0) | 2021.12.30 |
[object detection] 모델 로드하는 방법 (0) | 2021.12.30 |