Spaces:
Sleeping
Sleeping
import matplotlib.pyplot as plt | |
import numpy as np | |
from six import BytesIO | |
from PIL import Image | |
import tensorflow as tf | |
from object_detection.utils import label_map_util | |
from object_detection.utils import visualization_utils as viz_utils | |
from object_detection.utils import ops as utils_op | |
import tarfile | |
import wget | |
import gradio as gr | |
from huggingface_hub import snapshot_download | |
import os | |
import matplotlib.pyplot as plt | |
from tqdm import tqdm | |
import cv2 | |
PATH_TO_LABELS = 'data/label_map.pbtxt' | |
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) | |
def pil_image_as_numpy_array(pilimg): | |
img_array = tf.keras.utils.img_to_array(pilimg) | |
img_array = np.expand_dims(img_array, axis=0) | |
return img_array | |
def load_image_into_numpy_array(path): | |
image = None | |
image_data = tf.io.gfile.GFile(path, 'rb').read() | |
image = Image.open(BytesIO(image_data)) | |
return pil_image_as_numpy_array(image) | |
def load_model(): | |
download_dir = snapshot_download(REPO_ID) | |
saved_model_dir = os.path.join(download_dir, "saved_model") | |
detection_model = tf.saved_model.load(saved_model_dir) | |
return detection_model | |
def load_model2(): | |
wget.download("https://nyp-aicourse.s3-ap-southeast-1.amazonaws.com/pretrained-models/balloon_model.tar.gz") | |
tarfile.open("balloon_model.tar.gz").extractall() | |
model_dir = 'saved_model' | |
detection_model = tf.saved_model.load(str(model_dir)) | |
return detection_model | |
threshold = 0.50 | |
def predict(pilimg,video_in_filepath,threshold): | |
if pilimg: | |
image_np = pil_image_as_numpy_array(pilimg) | |
return predict2(image_np,threshold),None | |
elif video_in_filepath: | |
video_reader = cv2.VideoCapture(video_in_filepath) | |
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT)) | |
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
fps = video_reader.get(cv2.CAP_PROP_FPS) | |
video_out_filepath = 'detected.mp4' | |
video_writer = cv2.VideoWriter(video_out_filepath, | |
cv2.VideoWriter_fourcc(*'mp4v'), | |
fps, | |
(frame_w, frame_h)) | |
for i in tqdm(range(nb_frames)): | |
ret, image_np = video_reader.read() | |
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8) | |
results = detection_model(input_tensor) | |
viz_utils.visualize_boxes_and_labels_on_image_array( | |
image_np, | |
results['detection_boxes'][0].numpy(), | |
(results['detection_classes'][0].numpy()+ label_id_offset).astype(int), | |
results['detection_scores'][0].numpy(), | |
category_index, | |
use_normalized_coordinates=True, | |
max_boxes_to_draw=200, | |
min_score_thresh=.50, | |
agnostic_mode=False, | |
line_thickness=2) | |
video_writer.write(np.uint8(image_np)) | |
# Release camera and close windows | |
video_reader.release() | |
video_writer.release() | |
cv2.destroyAllWindows() | |
cv2.waitKey(1) | |
return None,video_out_filepath | |
else: | |
return None, None | |
def predict2(image_np,threshold): | |
results = detection_model(image_np) | |
# different object detection models have additional results | |
result = {key:value.numpy() for key,value in results.items()} | |
label_id_offset = 0 | |
image_np_with_detections = image_np.copy() | |
viz_utils.visualize_boxes_and_labels_on_image_array( | |
image_np_with_detections[0], | |
result['detection_boxes'][0], | |
(result['detection_classes'][0] + label_id_offset).astype(int), | |
result['detection_scores'][0], | |
category_index, | |
use_normalized_coordinates=True, | |
max_boxes_to_draw=200, | |
min_score_thresh=float(threshold) if threshold is not None else 0.40, | |
agnostic_mode=False, | |
line_thickness=2) | |
result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0]) | |
return result_pil_img | |
label_id_offset = 0 | |
samples_folder = 'test_samples' | |
# image_path = 'test_samples/image489.png' | |
def video_fn(video_in_filepath): | |
video_reader = cv2.VideoCapture(video_in_filepath) | |
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT)) | |
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
fps = video_reader.get(cv2.CAP_PROP_FPS) | |
video_out_filepath = 'detected.mp4' | |
video_writer = cv2.VideoWriter(video_out_filepath, | |
cv2.VideoWriter_fourcc(*'mp4v'), | |
fps, | |
(frame_w, frame_h)) | |
for i in tqdm(range(nb_frames)): | |
ret, image_np = video_reader.read() | |
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8) | |
results = detection_model(input_tensor) | |
viz_utils.visualize_boxes_and_labels_on_image_array( | |
image_np, | |
results['detection_boxes'][0].numpy(), | |
(results['detection_classes'][0].numpy()+ label_id_offset).astype(int), | |
results['detection_scores'][0].numpy(), | |
category_index, | |
use_normalized_coordinates=True, | |
max_boxes_to_draw=200, | |
min_score_thresh=.50, | |
agnostic_mode=False, | |
line_thickness=2) | |
video_writer.write(np.uint8(image_np)) | |
# Release camera and close windows | |
video_reader.release() | |
video_writer.release() | |
cv2.destroyAllWindows() | |
cv2.waitKey(1) | |
return video_out_filepath | |
REPO_ID = "23b719w/assignment2tfod_model2" | |
detection_model = load_model() | |
# pil_image = Image.open(image_path) | |
# image_arr = pil_image_as_numpy_array(pil_image) | |
# predicted_img = predict(image_arr) | |
# predicted_img.save('predicted.jpg') | |
gr.Interface(fn=predict, | |
inputs=[gr.Image(type="pil",label="Input Image",height=500,width=800),gr.Video(label="Input Video",height=500,width=800),gr.Textbox(placeholder="0.50",label="Set the confidence threshold (0.00-1.00)")], | |
outputs=[gr.Image(type="pil",label="Output Image",height=500,width=800),gr.Video(label="Output Video",height=500,width=800)], | |
title="Facemask & Glasses", | |
description="Model: ssd_mobilenet_v2_320x320", | |
theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), | |
#cache_examples = True, | |
#examples=[["test_samples/image489.png",None,0.55], ["test_samples/image825.png",None,0.55], ["test_samples/image833.png",None,0.55], ["test_samples/image846.png",None,0.55], [None,"test_samples/test_video.mp4",0.55]] | |
examples=[["test_samples/image489.png","test_samples/test_video.mp4",0.55]], | |
).launch(share=True) | |
# gr.Interface(fn=video_fn, | |
# inputs=gr.Video(label="Input Video"), | |
# outputs=gr.Video(label="Output Video"), | |
# title="Facemask & Glasses", | |
# description="Model: ssd_mobilenet_v2_320x320", | |
# theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), | |
# #examples="test_samples/test_video.mp4" | |
# ).launch(share=True) |