23b719w commited on
Commit
788004e
·
1 Parent(s): 58a6a5e

Upload app.py.py

Browse files
Files changed (1) hide show
  1. app.py.py +149 -0
app.py.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+ from six import BytesIO
4
+ from PIL import Image
5
+ import tensorflow as tf
6
+ from object_detection.utils import label_map_util
7
+ from object_detection.utils import visualization_utils as viz_utils
8
+ from object_detection.utils import ops as utils_op
9
+ import tarfile
10
+ import wget
11
+ import gradio as gr
12
+ from huggingface_hub import snapshot_download
13
+ import os
14
+
15
+ import matplotlib.pyplot as plt
16
+ from tqdm import tqdm
17
+ import cv2
18
+
19
+ PATH_TO_LABELS = 'data/label_map.pbtxt'
20
+ category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
21
+
22
+ def pil_image_as_numpy_array(pilimg):
23
+
24
+ img_array = tf.keras.utils.img_to_array(pilimg)
25
+ img_array = np.expand_dims(img_array, axis=0)
26
+ return img_array
27
+
28
+ def load_image_into_numpy_array(path):
29
+
30
+ image = None
31
+ image_data = tf.io.gfile.GFile(path, 'rb').read()
32
+ image = Image.open(BytesIO(image_data))
33
+ return pil_image_as_numpy_array(image)
34
+
35
+ def load_model():
36
+ download_dir = snapshot_download(REPO_ID)
37
+ saved_model_dir = os.path.join(download_dir, "saved_model")
38
+ detection_model = tf.saved_model.load(saved_model_dir)
39
+ return detection_model
40
+
41
+ def load_model2():
42
+ wget.download("https://nyp-aicourse.s3-ap-southeast-1.amazonaws.com/pretrained-models/balloon_model.tar.gz")
43
+ tarfile.open("balloon_model.tar.gz").extractall()
44
+ model_dir = 'saved_model'
45
+ detection_model = tf.saved_model.load(str(model_dir))
46
+ return detection_model
47
+
48
+ threshold = 0.50
49
+
50
+ def predict(pilimg,video_in_filepath,threshold):
51
+
52
+ image_np = pil_image_as_numpy_array(pilimg)
53
+ return predict2(image_np,threshold),None
54
+
55
+ def predict2(image_np,threshold):
56
+
57
+ results = detection_model(image_np)
58
+
59
+ # different object detection models have additional results
60
+ result = {key:value.numpy() for key,value in results.items()}
61
+
62
+ label_id_offset = 0
63
+ image_np_with_detections = image_np.copy()
64
+
65
+ viz_utils.visualize_boxes_and_labels_on_image_array(
66
+ image_np_with_detections[0],
67
+ result['detection_boxes'][0],
68
+ (result['detection_classes'][0] + label_id_offset).astype(int),
69
+ result['detection_scores'][0],
70
+ category_index,
71
+ use_normalized_coordinates=True,
72
+ max_boxes_to_draw=200,
73
+ min_score_thresh=float(threshold),
74
+ agnostic_mode=False,
75
+ line_thickness=2)
76
+
77
+ result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
78
+
79
+ return result_pil_img
80
+
81
+ label_id_offset = 0
82
+ samples_folder = 'test_samples'
83
+ # image_path = 'test_samples/image489.png'
84
+
85
+ def video_fn(video_reader):
86
+ #video_reader = cv2.VideoCapture(video_in_filepath)
87
+
88
+ nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
89
+ frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
90
+ frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
91
+ fps = video_reader.get(cv2.CAP_PROP_FPS)
92
+
93
+
94
+ video_out_filepath = 'detected.mp4'
95
+ video_writer = cv2.VideoWriter(video_out_filepath,
96
+ cv2.VideoWriter_fourcc(*'mp4v'),
97
+ fps,
98
+ (frame_w, frame_h))
99
+
100
+ for i in tqdm(range(nb_frames)):
101
+ ret, image_np = video_reader.read()
102
+ input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8)
103
+ results = detection_model(input_tensor)
104
+ viz_utils.visualize_boxes_and_labels_on_image_array(
105
+ image_np,
106
+ results['detection_boxes'][0].numpy(),
107
+ (results['detection_classes'][0].numpy()+ label_id_offset).astype(int),
108
+ results['detection_scores'][0].numpy(),
109
+ category_index,
110
+ use_normalized_coordinates=True,
111
+ max_boxes_to_draw=200,
112
+ min_score_thresh=.50,
113
+ agnostic_mode=False,
114
+ line_thickness=2)
115
+
116
+ video_writer.write(np.uint8(image_np))
117
+
118
+ # Release camera and close windows
119
+ video_reader.release()
120
+ video_writer.release()
121
+ cv2.destroyAllWindows()
122
+ cv2.waitKey(1)
123
+ return video_writer
124
+
125
+ REPO_ID = "23b719w/assignment2_tfodmodel"
126
+ detection_model = load_model()
127
+ # pil_image = Image.open(image_path)
128
+ # image_arr = pil_image_as_numpy_array(pil_image)
129
+
130
+ # predicted_img = predict(image_arr)
131
+ # predicted_img.save('predicted.jpg')
132
+
133
+ # gr.Interface(fn=predict,
134
+ # inputs=[gr.Image(type="pil",label="Input Image"),gr.Video(label="Input Video"),gr.Textbox(placeholder="0.50",label="Set the confidence threshold (0.00-1.00)")],
135
+ # outputs=[gr.Image(type="pil",label="Output Image"),gr.Video(label="Output Video")],
136
+ # title="Facemask & Glasses",
137
+ # description="Model: ssd_mobilenet_v2_320x320",
138
+ # theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
139
+ # examples=[["test_samples/image489.png","",0.55], ["test_samples/image825.png","",0.55], ["test_samples/image833.png","",0.55], ["test_samples/image846.png","",0.55]]
140
+ # ).launch(share=True)
141
+
142
+ gr.Interface(fn=video_fn,
143
+ inputs=gr.Video(label="Input Video"),
144
+ outputs=gr.Video(label="Output Video"),
145
+ title="Facemask & Glasses",
146
+ description="Model: ssd_mobilenet_v2_320x320",
147
+ theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
148
+ examples="test_samples/test_video.mp4"
149
+ ).launch(share=True)