gaoyu1314 commited on
Commit
25bf480
·
verified ·
1 Parent(s): a81d82e

Upload 8 files

Browse files
util/__init__.py ADDED
File without changes
util/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (149 Bytes). View file
 
util/__pycache__/box_annotator.cpython-311.pyc ADDED
Binary file (10.5 kB). View file
 
util/__pycache__/utils.cpython-311.pyc ADDED
Binary file (42.8 kB). View file
 
util/box_annotator.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union, Tuple
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ from supervision.detection.core import Detections
7
+ from supervision.draw.color import Color, ColorPalette
8
+
9
+
10
+ class BoxAnnotator:
11
+ """
12
+ A class for drawing bounding boxes on an image using detections provided.
13
+
14
+ Attributes:
15
+ color (Union[Color, ColorPalette]): The color to draw the bounding box,
16
+ can be a single color or a color palette
17
+ thickness (int): The thickness of the bounding box lines, default is 2
18
+ text_color (Color): The color of the text on the bounding box, default is white
19
+ text_scale (float): The scale of the text on the bounding box, default is 0.5
20
+ text_thickness (int): The thickness of the text on the bounding box,
21
+ default is 1
22
+ text_padding (int): The padding around the text on the bounding box,
23
+ default is 5
24
+
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
30
+ thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
31
+ text_color: Color = Color.BLACK,
32
+ text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
33
+ text_thickness: int = 2, #1, # 2 for demo
34
+ text_padding: int = 10,
35
+ avoid_overlap: bool = True,
36
+ ):
37
+ self.color: Union[Color, ColorPalette] = color
38
+ self.thickness: int = thickness
39
+ self.text_color: Color = text_color
40
+ self.text_scale: float = text_scale
41
+ self.text_thickness: int = text_thickness
42
+ self.text_padding: int = text_padding
43
+ self.avoid_overlap: bool = avoid_overlap
44
+
45
+ def annotate(
46
+ self,
47
+ scene: np.ndarray,
48
+ detections: Detections,
49
+ labels: Optional[List[str]] = None,
50
+ skip_label: bool = False,
51
+ image_size: Optional[Tuple[int, int]] = None,
52
+ ) -> np.ndarray:
53
+ """
54
+ Draws bounding boxes on the frame using the detections provided.
55
+
56
+ Args:
57
+ scene (np.ndarray): The image on which the bounding boxes will be drawn
58
+ detections (Detections): The detections for which the
59
+ bounding boxes will be drawn
60
+ labels (Optional[List[str]]): An optional list of labels
61
+ corresponding to each detection. If `labels` are not provided,
62
+ corresponding `class_id` will be used as label.
63
+ skip_label (bool): Is set to `True`, skips bounding box label annotation.
64
+ Returns:
65
+ np.ndarray: The image with the bounding boxes drawn on it
66
+
67
+ Example:
68
+ ```python
69
+ import supervision as sv
70
+
71
+ classes = ['person', ...]
72
+ image = ...
73
+ detections = sv.Detections(...)
74
+
75
+ box_annotator = sv.BoxAnnotator()
76
+ labels = [
77
+ f"{classes[class_id]} {confidence:0.2f}"
78
+ for _, _, confidence, class_id, _ in detections
79
+ ]
80
+ annotated_frame = box_annotator.annotate(
81
+ scene=image.copy(),
82
+ detections=detections,
83
+ labels=labels
84
+ )
85
+ ```
86
+ """
87
+ font = cv2.FONT_HERSHEY_SIMPLEX
88
+ for i in range(len(detections)):
89
+ x1, y1, x2, y2 = detections.xyxy[i].astype(int)
90
+ class_id = (
91
+ detections.class_id[i] if detections.class_id is not None else None
92
+ )
93
+ idx = class_id if class_id is not None else i
94
+ color = (
95
+ self.color.by_idx(idx)
96
+ if isinstance(self.color, ColorPalette)
97
+ else self.color
98
+ )
99
+ cv2.rectangle(
100
+ img=scene,
101
+ pt1=(x1, y1),
102
+ pt2=(x2, y2),
103
+ color=color.as_bgr(),
104
+ thickness=self.thickness,
105
+ )
106
+ if skip_label:
107
+ continue
108
+
109
+ text = (
110
+ f"{class_id}"
111
+ if (labels is None or len(detections) != len(labels))
112
+ else labels[i]
113
+ )
114
+
115
+ text_width, text_height = cv2.getTextSize(
116
+ text=text,
117
+ fontFace=font,
118
+ fontScale=self.text_scale,
119
+ thickness=self.text_thickness,
120
+ )[0]
121
+
122
+ if not self.avoid_overlap:
123
+ text_x = x1 + self.text_padding
124
+ text_y = y1 - self.text_padding
125
+
126
+ text_background_x1 = x1
127
+ text_background_y1 = y1 - 2 * self.text_padding - text_height
128
+
129
+ text_background_x2 = x1 + 2 * self.text_padding + text_width
130
+ text_background_y2 = y1
131
+ # text_x = x1 - self.text_padding - text_width
132
+ # text_y = y1 + self.text_padding + text_height
133
+ # text_background_x1 = x1 - 2 * self.text_padding - text_width
134
+ # text_background_y1 = y1
135
+ # text_background_x2 = x1
136
+ # text_background_y2 = y1 + 2 * self.text_padding + text_height
137
+ else:
138
+ text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
139
+
140
+ cv2.rectangle(
141
+ img=scene,
142
+ pt1=(text_background_x1, text_background_y1),
143
+ pt2=(text_background_x2, text_background_y2),
144
+ color=color.as_bgr(),
145
+ thickness=cv2.FILLED,
146
+ )
147
+ # import pdb; pdb.set_trace()
148
+ box_color = color.as_rgb()
149
+ luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
150
+ text_color = (0,0,0) if luminance > 160 else (255,255,255)
151
+ cv2.putText(
152
+ img=scene,
153
+ text=text,
154
+ org=(text_x, text_y),
155
+ fontFace=font,
156
+ fontScale=self.text_scale,
157
+ # color=self.text_color.as_rgb(),
158
+ color=text_color,
159
+ thickness=self.text_thickness,
160
+ lineType=cv2.LINE_AA,
161
+ )
162
+ return scene
163
+
164
+
165
+ def box_area(box):
166
+ return (box[2] - box[0]) * (box[3] - box[1])
167
+
168
+ def intersection_area(box1, box2):
169
+ x1 = max(box1[0], box2[0])
170
+ y1 = max(box1[1], box2[1])
171
+ x2 = min(box1[2], box2[2])
172
+ y2 = min(box1[3], box2[3])
173
+ return max(0, x2 - x1) * max(0, y2 - y1)
174
+
175
+ def IoU(box1, box2, return_max=True):
176
+ intersection = intersection_area(box1, box2)
177
+ union = box_area(box1) + box_area(box2) - intersection
178
+ if box_area(box1) > 0 and box_area(box2) > 0:
179
+ ratio1 = intersection / box_area(box1)
180
+ ratio2 = intersection / box_area(box2)
181
+ else:
182
+ ratio1, ratio2 = 0, 0
183
+ if return_max:
184
+ return max(intersection / union, ratio1, ratio2)
185
+ else:
186
+ return intersection / union
187
+
188
+
189
+ def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
190
+ """ check overlap of text and background detection box, and get_optimal_label_pos,
191
+ pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
192
+ Threshold: default to 0.3
193
+ """
194
+
195
+ def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
196
+ is_overlap = False
197
+ for i in range(len(detections)):
198
+ detection = detections.xyxy[i].astype(int)
199
+ if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
200
+ is_overlap = True
201
+ break
202
+ # check if the text is out of the image
203
+ if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
204
+ is_overlap = True
205
+ return is_overlap
206
+
207
+ # if pos == 'top left':
208
+ text_x = x1 + text_padding
209
+ text_y = y1 - text_padding
210
+
211
+ text_background_x1 = x1
212
+ text_background_y1 = y1 - 2 * text_padding - text_height
213
+
214
+ text_background_x2 = x1 + 2 * text_padding + text_width
215
+ text_background_y2 = y1
216
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
217
+ if not is_overlap:
218
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
219
+
220
+ # elif pos == 'outer left':
221
+ text_x = x1 - text_padding - text_width
222
+ text_y = y1 + text_padding + text_height
223
+
224
+ text_background_x1 = x1 - 2 * text_padding - text_width
225
+ text_background_y1 = y1
226
+
227
+ text_background_x2 = x1
228
+ text_background_y2 = y1 + 2 * text_padding + text_height
229
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
230
+ if not is_overlap:
231
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
232
+
233
+
234
+ # elif pos == 'outer right':
235
+ text_x = x2 + text_padding
236
+ text_y = y1 + text_padding + text_height
237
+
238
+ text_background_x1 = x2
239
+ text_background_y1 = y1
240
+
241
+ text_background_x2 = x2 + 2 * text_padding + text_width
242
+ text_background_y2 = y1 + 2 * text_padding + text_height
243
+
244
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
245
+ if not is_overlap:
246
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
247
+
248
+ # elif pos == 'top right':
249
+ text_x = x2 - text_padding - text_width
250
+ text_y = y1 - text_padding
251
+
252
+ text_background_x1 = x2 - 2 * text_padding - text_width
253
+ text_background_y1 = y1 - 2 * text_padding - text_height
254
+
255
+ text_background_x2 = x2
256
+ text_background_y2 = y1
257
+
258
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
259
+ if not is_overlap:
260
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
261
+
262
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
util/omniparser.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from util.utils import get_som_labeled_img, get_caption_model_processor, get_yolo_model, check_ocr_box
2
+ import torch
3
+ from PIL import Image
4
+ import io
5
+ import base64
6
+ from typing import Dict
7
+ class Omniparser(object):
8
+ def __init__(self, config: Dict):
9
+ self.config = config
10
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
+
12
+ self.som_model = get_yolo_model(model_path=config['som_model_path'])
13
+ self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
14
+ print('Omniparser initialized!!!')
15
+
16
+ def parse(self, image_base64: str):
17
+ image_bytes = base64.b64decode(image_base64)
18
+ image = Image.open(io.BytesIO(image_bytes))
19
+ print('image size:', image.size)
20
+
21
+ box_overlay_ratio = max(image.size) / 3200
22
+ draw_bbox_config = {
23
+ 'text_scale': 0.8 * box_overlay_ratio,
24
+ 'text_thickness': max(int(2 * box_overlay_ratio), 1),
25
+ 'text_padding': max(int(3 * box_overlay_ratio), 1),
26
+ 'thickness': max(int(3 * box_overlay_ratio), 1),
27
+ }
28
+
29
+ (text, ocr_bbox), _ = check_ocr_box(image, display_img=False, output_bb_format='xyxy', easyocr_args={'text_threshold': 0.8}, use_paddleocr=False)
30
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image, self.som_model, BOX_TRESHOLD = self.config['BOX_TRESHOLD'], output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128)
31
+
32
+ return dino_labled_img, parsed_content_list
util/utils - 副本.py ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from ultralytics import YOLO
2
+ import os
3
+ import io
4
+ import base64
5
+ import time
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import json
8
+ import requests
9
+ # utility function
10
+ import os
11
+ from openai import AzureOpenAI
12
+
13
+ import json
14
+ import sys
15
+ import os
16
+ import cv2
17
+ import numpy as np
18
+ # %matplotlib inline
19
+ from matplotlib import pyplot as plt
20
+ import easyocr
21
+ from paddleocr import PaddleOCR
22
+ reader = easyocr.Reader(['en'])
23
+ paddle_ocr = PaddleOCR(
24
+ lang='en', # other lang also available
25
+ use_angle_cls=False,
26
+ use_gpu=False, # using cuda will conflict with pytorch in the same process
27
+ show_log=False,
28
+ max_batch_size=1024,
29
+ use_dilation=True, # improves accuracy
30
+ det_db_score_mode='slow', # improves accuracy
31
+ rec_batch_num=1024)
32
+ import time
33
+ import base64
34
+
35
+ import os
36
+ import ast
37
+ import torch
38
+ from typing import Tuple, List, Union
39
+ from torchvision.ops import box_convert
40
+ import re
41
+ from torchvision.transforms import ToPILImage
42
+ import supervision as sv
43
+ import torchvision.transforms as T
44
+ from util.box_annotator import BoxAnnotator
45
+
46
+
47
+ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
48
+ if not device:
49
+ device = "cuda" if torch.cuda.is_available() else "cpu"
50
+ if model_name == "blip2":
51
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
52
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
53
+ if device == 'cpu':
54
+ model = Blip2ForConditionalGeneration.from_pretrained(
55
+ model_name_or_path, device_map=None, torch_dtype=torch.float32
56
+ )
57
+ else:
58
+ model = Blip2ForConditionalGeneration.from_pretrained(
59
+ model_name_or_path, device_map=None, torch_dtype=torch.float16
60
+ ).to(device)
61
+ elif model_name == "florence2":
62
+ from transformers import AutoProcessor, AutoModelForCausalLM
63
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
64
+ if device == 'cpu':
65
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
66
+ else:
67
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
68
+ return {'model': model.to(device), 'processor': processor}
69
+
70
+
71
+ def get_yolo_model(model_path):
72
+ from ultralytics import YOLO
73
+ # Load the model.
74
+ model = YOLO(model_path)
75
+ return model
76
+
77
+
78
+ @torch.inference_mode()
79
+ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=128):
80
+ # Number of samples per batch, --> 128 roughly takes 4 GB of GPU memory for florence v2 model
81
+ to_pil = ToPILImage()
82
+ if starting_idx:
83
+ non_ocr_boxes = filtered_boxes[starting_idx:]
84
+ else:
85
+ non_ocr_boxes = filtered_boxes
86
+ croped_pil_image = []
87
+ for i, coord in enumerate(non_ocr_boxes):
88
+ try:
89
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
90
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
91
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
92
+ cropped_image = cv2.resize(cropped_image, (64, 64))
93
+ croped_pil_image.append(to_pil(cropped_image))
94
+ except:
95
+ continue
96
+
97
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
98
+ if not prompt:
99
+ if 'florence' in model.config.name_or_path:
100
+ prompt = "<CAPTION>"
101
+ else:
102
+ prompt = "The image shows"
103
+
104
+ generated_texts = []
105
+ device = model.device
106
+ for i in range(0, len(croped_pil_image), batch_size):
107
+ start = time.time()
108
+ batch = croped_pil_image[i:i+batch_size]
109
+ t1 = time.time()
110
+ if model.device.type == 'cuda':
111
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
112
+ else:
113
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
114
+ if 'florence' in model.config.name_or_path:
115
+ generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
116
+ else:
117
+ generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
118
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
119
+ generated_text = [gen.strip() for gen in generated_text]
120
+ generated_texts.extend(generated_text)
121
+
122
+ return generated_texts
123
+
124
+
125
+
126
+ def get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor):
127
+ to_pil = ToPILImage()
128
+ if ocr_bbox:
129
+ non_ocr_boxes = filtered_boxes[len(ocr_bbox):]
130
+ else:
131
+ non_ocr_boxes = filtered_boxes
132
+ croped_pil_image = []
133
+ for i, coord in enumerate(non_ocr_boxes):
134
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
135
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
136
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
137
+ croped_pil_image.append(to_pil(cropped_image))
138
+
139
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
140
+ device = model.device
141
+ messages = [{"role": "user", "content": "<|image_1|>\ndescribe the icon in one sentence"}]
142
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
143
+
144
+ batch_size = 5 # Number of samples per batch
145
+ generated_texts = []
146
+
147
+ for i in range(0, len(croped_pil_image), batch_size):
148
+ images = croped_pil_image[i:i+batch_size]
149
+ image_inputs = [processor.image_processor(x, return_tensors="pt") for x in images]
150
+ inputs ={'input_ids': [], 'attention_mask': [], 'pixel_values': [], 'image_sizes': []}
151
+ texts = [prompt] * len(images)
152
+ for i, txt in enumerate(texts):
153
+ input = processor._convert_images_texts_to_inputs(image_inputs[i], txt, return_tensors="pt")
154
+ inputs['input_ids'].append(input['input_ids'])
155
+ inputs['attention_mask'].append(input['attention_mask'])
156
+ inputs['pixel_values'].append(input['pixel_values'])
157
+ inputs['image_sizes'].append(input['image_sizes'])
158
+ max_len = max([x.shape[1] for x in inputs['input_ids']])
159
+ for i, v in enumerate(inputs['input_ids']):
160
+ inputs['input_ids'][i] = torch.cat([processor.tokenizer.pad_token_id * torch.ones(1, max_len - v.shape[1], dtype=torch.long), v], dim=1)
161
+ inputs['attention_mask'][i] = torch.cat([torch.zeros(1, max_len - v.shape[1], dtype=torch.long), inputs['attention_mask'][i]], dim=1)
162
+ inputs_cat = {k: torch.concatenate(v).to(device) for k, v in inputs.items()}
163
+
164
+ generation_args = {
165
+ "max_new_tokens": 25,
166
+ "temperature": 0.01,
167
+ "do_sample": False,
168
+ }
169
+ generate_ids = model.generate(**inputs_cat, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
170
+ # # remove input tokens
171
+ generate_ids = generate_ids[:, inputs_cat['input_ids'].shape[1]:]
172
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
173
+ response = [res.strip('\n').strip() for res in response]
174
+ generated_texts.extend(response)
175
+
176
+ return generated_texts
177
+
178
+ def remove_overlap(boxes, iou_threshold, ocr_bbox=None):
179
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
180
+
181
+ def box_area(box):
182
+ return (box[2] - box[0]) * (box[3] - box[1])
183
+
184
+ def intersection_area(box1, box2):
185
+ x1 = max(box1[0], box2[0])
186
+ y1 = max(box1[1], box2[1])
187
+ x2 = min(box1[2], box2[2])
188
+ y2 = min(box1[3], box2[3])
189
+ return max(0, x2 - x1) * max(0, y2 - y1)
190
+
191
+ def IoU(box1, box2):
192
+ intersection = intersection_area(box1, box2)
193
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
194
+ if box_area(box1) > 0 and box_area(box2) > 0:
195
+ ratio1 = intersection / box_area(box1)
196
+ ratio2 = intersection / box_area(box2)
197
+ else:
198
+ ratio1, ratio2 = 0, 0
199
+ return max(intersection / union, ratio1, ratio2)
200
+
201
+ def is_inside(box1, box2):
202
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
203
+ intersection = intersection_area(box1, box2)
204
+ ratio1 = intersection / box_area(box1)
205
+ return ratio1 > 0.95
206
+
207
+ boxes = boxes.tolist()
208
+ filtered_boxes = []
209
+ if ocr_bbox:
210
+ filtered_boxes.extend(ocr_bbox)
211
+ # print('ocr_bbox!!!', ocr_bbox)
212
+ for i, box1 in enumerate(boxes):
213
+ # if not any(IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2) for j, box2 in enumerate(boxes) if i != j):
214
+ is_valid_box = True
215
+ for j, box2 in enumerate(boxes):
216
+ # keep the smaller box
217
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
218
+ is_valid_box = False
219
+ break
220
+ if is_valid_box:
221
+ # add the following 2 lines to include ocr bbox
222
+ if ocr_bbox:
223
+ # only add the box if it does not overlap with any ocr bbox
224
+ if not any(IoU(box1, box3) > iou_threshold and not is_inside(box1, box3) for k, box3 in enumerate(ocr_bbox)):
225
+ filtered_boxes.append(box1)
226
+ else:
227
+ filtered_boxes.append(box1)
228
+ return torch.tensor(filtered_boxes)
229
+
230
+
231
+ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
232
+ '''
233
+ ocr_bbox format: [{'type': 'text', 'bbox':[x,y], 'interactivity':False, 'content':str }, ...]
234
+ boxes format: [{'type': 'icon', 'bbox':[x,y], 'interactivity':True, 'content':None }, ...]
235
+
236
+ '''
237
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
238
+
239
+ def box_area(box):
240
+ return (box[2] - box[0]) * (box[3] - box[1])
241
+
242
+ def intersection_area(box1, box2):
243
+ x1 = max(box1[0], box2[0])
244
+ y1 = max(box1[1], box2[1])
245
+ x2 = min(box1[2], box2[2])
246
+ y2 = min(box1[3], box2[3])
247
+ return max(0, x2 - x1) * max(0, y2 - y1)
248
+
249
+ def IoU(box1, box2):
250
+ intersection = intersection_area(box1, box2)
251
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
252
+ if box_area(box1) > 0 and box_area(box2) > 0:
253
+ ratio1 = intersection / box_area(box1)
254
+ ratio2 = intersection / box_area(box2)
255
+ else:
256
+ ratio1, ratio2 = 0, 0
257
+ return max(intersection / union, ratio1, ratio2)
258
+
259
+ def is_inside(box1, box2):
260
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
261
+ intersection = intersection_area(box1, box2)
262
+ ratio1 = intersection / box_area(box1)
263
+ return ratio1 > 0.80
264
+
265
+ # boxes = boxes.tolist()
266
+ filtered_boxes = []
267
+ if ocr_bbox:
268
+ filtered_boxes.extend(ocr_bbox)
269
+ # print('ocr_bbox!!!', ocr_bbox)
270
+ for i, box1_elem in enumerate(boxes):
271
+ box1 = box1_elem['bbox']
272
+ is_valid_box = True
273
+ for j, box2_elem in enumerate(boxes):
274
+ # keep the smaller box
275
+ box2 = box2_elem['bbox']
276
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
277
+ is_valid_box = False
278
+ break
279
+ if is_valid_box:
280
+ if ocr_bbox:
281
+ # keep yolo boxes + prioritize ocr label
282
+ box_added = False
283
+ ocr_labels = ''
284
+ for box3_elem in ocr_bbox:
285
+ if not box_added:
286
+ box3 = box3_elem['bbox']
287
+ if is_inside(box3, box1): # ocr inside icon
288
+ # box_added = True
289
+ # delete the box3_elem from ocr_bbox
290
+ try:
291
+ # gather all ocr labels
292
+ ocr_labels += box3_elem['content'] + ' '
293
+ filtered_boxes.remove(box3_elem)
294
+ except:
295
+ continue
296
+ # break
297
+ elif is_inside(box1, box3): # icon inside ocr, don't added this icon box, no need to check other ocr bbox bc no overlap between ocr bbox, icon can only be in one ocr box
298
+ box_added = True
299
+ break
300
+ else:
301
+ continue
302
+ if not box_added:
303
+ if ocr_labels:
304
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels, 'source':'box_yolo_content_ocr'})
305
+ else:
306
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, 'source':'box_yolo_content_yolo'})
307
+ else:
308
+ filtered_boxes.append(box1)
309
+ return filtered_boxes # torch.tensor(filtered_boxes)
310
+
311
+
312
+ def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
313
+ transform = T.Compose(
314
+ [
315
+ T.RandomResize([800], max_size=1333),
316
+ T.ToTensor(),
317
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
318
+ ]
319
+ )
320
+ image_source = Image.open(image_path).convert("RGB")
321
+ image = np.asarray(image_source)
322
+ image_transformed, _ = transform(image_source, None)
323
+ return image, image_transformed
324
+
325
+
326
+ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str], text_scale: float,
327
+ text_padding=5, text_thickness=2, thickness=3) -> np.ndarray:
328
+ """
329
+ This function annotates an image with bounding boxes and labels.
330
+
331
+ Parameters:
332
+ image_source (np.ndarray): The source image to be annotated.
333
+ boxes (torch.Tensor): A tensor containing bounding box coordinates. in cxcywh format, pixel scale
334
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
335
+ phrases (List[str]): A list of labels for each bounding box.
336
+ text_scale (float): The scale of the text to be displayed. 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
337
+
338
+ Returns:
339
+ np.ndarray: The annotated image.
340
+ """
341
+ h, w, _ = image_source.shape
342
+ boxes = boxes * torch.Tensor([w, h, w, h])
343
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
344
+ xywh = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xywh").numpy()
345
+ detections = sv.Detections(xyxy=xyxy)
346
+
347
+ labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
348
+
349
+ box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
350
+ annotated_frame = image_source.copy()
351
+ annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
352
+
353
+ label_coordinates = {f"{phrase}": v for phrase, v in zip(phrases, xywh)}
354
+ return annotated_frame, label_coordinates
355
+
356
+
357
+ def predict(model, image, caption, box_threshold, text_threshold):
358
+ """ Use huggingface model to replace the original model
359
+ """
360
+ model, processor = model['model'], model['processor']
361
+ device = model.device
362
+
363
+ inputs = processor(images=image, text=caption, return_tensors="pt").to(device)
364
+ with torch.no_grad():
365
+ outputs = model(**inputs)
366
+
367
+ results = processor.post_process_grounded_object_detection(
368
+ outputs,
369
+ inputs.input_ids,
370
+ box_threshold=box_threshold, # 0.4,
371
+ text_threshold=text_threshold, # 0.3,
372
+ target_sizes=[image.size[::-1]]
373
+ )[0]
374
+ boxes, logits, phrases = results["boxes"], results["scores"], results["labels"]
375
+ return boxes, logits, phrases
376
+
377
+
378
+ def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
379
+ """ Use huggingface model to replace the original model
380
+ """
381
+ # model = model['model']
382
+ if scale_img:
383
+ result = model.predict(
384
+ source=image,
385
+ conf=box_threshold,
386
+ imgsz=imgsz,
387
+ iou=iou_threshold, # default 0.7
388
+ )
389
+ else:
390
+ result = model.predict(
391
+ source=image,
392
+ conf=box_threshold,
393
+ iou=iou_threshold, # default 0.7
394
+ )
395
+ boxes = result[0].boxes.xyxy#.tolist() # in pixel space
396
+ conf = result[0].boxes.conf
397
+ phrases = [str(i) for i in range(len(boxes))]
398
+
399
+ return boxes, conf, phrases
400
+
401
+ def int_box_area(box, w, h):
402
+ x1, y1, x2, y2 = box
403
+ int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
404
+ area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
405
+ return area
406
+
407
+ def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=128):
408
+ """Process either an image path or Image object
409
+
410
+ Args:
411
+ image_source: Either a file path (str) or PIL Image object
412
+ ...
413
+ """
414
+ if isinstance(image_source, str):
415
+ image_source = Image.open(image_source)
416
+ image_source = image_source.convert("RGB") # for CLIP
417
+ w, h = image_source.size
418
+ if not imgsz:
419
+ imgsz = (h, w)
420
+ # print('image size:', w, h)
421
+ xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
422
+ xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
423
+ image_source = np.asarray(image_source)
424
+ phrases = [str(i) for i in range(len(phrases))]
425
+
426
+ # annotate the image with labels
427
+ if ocr_bbox:
428
+ ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
429
+ ocr_bbox=ocr_bbox.tolist()
430
+ else:
431
+ print('no ocr bbox!!!')
432
+ ocr_bbox = None
433
+
434
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt, 'source': 'box_ocr_content_ocr'} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
435
+ xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
436
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
437
+
438
+ # sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
439
+ filtered_boxes_elem = sorted(filtered_boxes, key=lambda x: x['content'] is None)
440
+ # get the index of the first 'content': None
441
+ starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
442
+ filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
443
+ print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
444
+
445
+ # get parsed icon local semantics
446
+ time1 = time.time()
447
+ if use_local_semantics:
448
+ caption_model = caption_model_processor['model']
449
+ if 'phi3_v' in caption_model.config.model_type:
450
+ parsed_content_icon = get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor)
451
+ else:
452
+ parsed_content_icon = get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=prompt,batch_size=batch_size)
453
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
454
+ icon_start = len(ocr_text)
455
+ parsed_content_icon_ls = []
456
+ # fill the filtered_boxes_elem None content with parsed_content_icon in order
457
+ for i, box in enumerate(filtered_boxes_elem):
458
+ if box['content'] is None:
459
+ box['content'] = parsed_content_icon.pop(0)
460
+ for i, txt in enumerate(parsed_content_icon):
461
+ parsed_content_icon_ls.append(f"Icon Box ID {str(i+icon_start)}: {txt}")
462
+ parsed_content_merged = ocr_text + parsed_content_icon_ls
463
+ else:
464
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
465
+ parsed_content_merged = ocr_text
466
+ print('time to get parsed content:', time.time()-time1)
467
+
468
+ filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
469
+
470
+ phrases = [i for i in range(len(filtered_boxes))]
471
+
472
+ # draw boxes
473
+ if draw_bbox_config:
474
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, **draw_bbox_config)
475
+ else:
476
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, text_scale=text_scale, text_padding=text_padding)
477
+
478
+ pil_img = Image.fromarray(annotated_frame)
479
+ buffered = io.BytesIO()
480
+ pil_img.save(buffered, format="PNG")
481
+ encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
482
+ if output_coord_in_ratio:
483
+ label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
484
+ assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
485
+
486
+ return encoded_image, label_coordinates, filtered_boxes_elem
487
+
488
+
489
+ def get_xywh(input):
490
+ x, y, w, h = input[0][0], input[0][1], input[2][0] - input[0][0], input[2][1] - input[0][1]
491
+ x, y, w, h = int(x), int(y), int(w), int(h)
492
+ return x, y, w, h
493
+
494
+ def get_xyxy(input):
495
+ x, y, xp, yp = input[0][0], input[0][1], input[2][0], input[2][1]
496
+ x, y, xp, yp = int(x), int(y), int(xp), int(yp)
497
+ return x, y, xp, yp
498
+
499
+ def get_xywh_yolo(input):
500
+ x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
501
+ x, y, w, h = int(x), int(y), int(w), int(h)
502
+ return x, y, w, h
503
+
504
+ def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
505
+ if isinstance(image_source, str):
506
+ image_source = Image.open(image_source)
507
+ if image_source.mode == 'RGBA':
508
+ # Convert RGBA to RGB to avoid alpha channel issues
509
+ image_source = image_source.convert('RGB')
510
+ image_np = np.array(image_source)
511
+ w, h = image_source.size
512
+ if use_paddleocr:
513
+ if easyocr_args is None:
514
+ text_threshold = 0.5
515
+ else:
516
+ text_threshold = easyocr_args['text_threshold']
517
+ result = paddle_ocr.ocr(image_np, cls=False)[0]
518
+ coord = [item[0] for item in result if item[1][1] > text_threshold]
519
+ text = [item[1][0] for item in result if item[1][1] > text_threshold]
520
+ else: # EasyOCR
521
+ if easyocr_args is None:
522
+ easyocr_args = {}
523
+ result = reader.readtext(image_np, **easyocr_args)
524
+ coord = [item[0] for item in result]
525
+ text = [item[1] for item in result]
526
+ if display_img:
527
+ opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
528
+ bb = []
529
+ for item in coord:
530
+ x, y, a, b = get_xywh(item)
531
+ bb.append((x, y, a, b))
532
+ cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
533
+ # matplotlib expects RGB
534
+ plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
535
+ else:
536
+ if output_bb_format == 'xywh':
537
+ bb = [get_xywh(item) for item in coord]
538
+ elif output_bb_format == 'xyxy':
539
+ bb = [get_xyxy(item) for item in coord]
540
+ return (text, bb), goal_filtering
util/utils.py ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from ultralytics import YOLO
2
+ import os
3
+ import io
4
+ import base64
5
+ import time
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import json
8
+ import requests
9
+ # utility function
10
+ import os
11
+ from openai import AzureOpenAI
12
+
13
+ import json
14
+ import sys
15
+ import os
16
+ import cv2
17
+ import numpy as np
18
+ # %matplotlib inline
19
+ from matplotlib import pyplot as plt
20
+ import easyocr
21
+ from paddleocr import PaddleOCR
22
+ reader = easyocr.Reader(['en'])
23
+ paddle_ocr = PaddleOCR(
24
+ lang='en', # other lang also available
25
+ use_angle_cls=False,
26
+ use_gpu=False, # using cuda will conflict with pytorch in the same process
27
+ show_log=False,
28
+ max_batch_size=1024,
29
+ use_dilation=True, # improves accuracy
30
+ det_db_score_mode='slow', # improves accuracy
31
+ rec_batch_num=1024)
32
+ import time
33
+ import base64
34
+
35
+ import os
36
+ import ast
37
+ import torch
38
+ from typing import Tuple, List, Union
39
+ from torchvision.ops import box_convert
40
+ import re
41
+ from torchvision.transforms import ToPILImage
42
+ import supervision as sv
43
+ import torchvision.transforms as T
44
+ from util.box_annotator import BoxAnnotator
45
+
46
+
47
+ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
48
+ if not device:
49
+ device = "cuda" if torch.cuda.is_available() else "cpu"
50
+ if model_name == "blip2":
51
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
52
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
53
+ if device == 'cpu':
54
+ model = Blip2ForConditionalGeneration.from_pretrained(
55
+ model_name_or_path, device_map=None, torch_dtype=torch.float32
56
+ )
57
+ else:
58
+ model = Blip2ForConditionalGeneration.from_pretrained(
59
+ model_name_or_path, device_map=None, torch_dtype=torch.float16
60
+ ).to(device)
61
+ elif model_name == "florence2":
62
+ from transformers import AutoProcessor, AutoModelForCausalLM
63
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
64
+ if device == 'cpu':
65
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
66
+ else:
67
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
68
+ return {'model': model.to(device), 'processor': processor}
69
+
70
+
71
+ def get_yolo_model(model_path):
72
+ from ultralytics import YOLO
73
+ # Load the model.
74
+ model = YOLO(model_path)
75
+ return model
76
+
77
+
78
+ @torch.inference_mode()
79
+ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=128):
80
+ # Number of samples per batch, --> 128 roughly takes 4 GB of GPU memory for florence v2 model
81
+ to_pil = ToPILImage()
82
+ if starting_idx:
83
+ non_ocr_boxes = filtered_boxes[starting_idx:]
84
+ else:
85
+ non_ocr_boxes = filtered_boxes
86
+ croped_pil_image = []
87
+ for i, coord in enumerate(non_ocr_boxes):
88
+ try:
89
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
90
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
91
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
92
+ cropped_image = cv2.resize(cropped_image, (64, 64))
93
+ croped_pil_image.append(to_pil(cropped_image))
94
+ except:
95
+ continue
96
+
97
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
98
+ if not prompt:
99
+ if 'florence' in model.config.name_or_path:
100
+ prompt = "<CAPTION>"
101
+ else:
102
+ prompt = "The image shows"
103
+
104
+ generated_texts = []
105
+ device = model.device
106
+ for i in range(0, len(croped_pil_image), batch_size):
107
+ start = time.time()
108
+ batch = croped_pil_image[i:i+batch_size]
109
+ t1 = time.time()
110
+ if model.device.type == 'cuda':
111
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
112
+ else:
113
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
114
+ if 'florence' in model.config.name_or_path:
115
+ generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
116
+ else:
117
+ generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
118
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
119
+ generated_text = [gen.strip() for gen in generated_text]
120
+ generated_texts.extend(generated_text)
121
+
122
+ return generated_texts
123
+
124
+
125
+
126
+ def get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor):
127
+ to_pil = ToPILImage()
128
+ if ocr_bbox:
129
+ non_ocr_boxes = filtered_boxes[len(ocr_bbox):]
130
+ else:
131
+ non_ocr_boxes = filtered_boxes
132
+ croped_pil_image = []
133
+ for i, coord in enumerate(non_ocr_boxes):
134
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
135
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
136
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
137
+ croped_pil_image.append(to_pil(cropped_image))
138
+
139
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
140
+ device = model.device
141
+ messages = [{"role": "user", "content": "<|image_1|>\ndescribe the icon in one sentence"}]
142
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
143
+
144
+ batch_size = 5 # Number of samples per batch
145
+ generated_texts = []
146
+
147
+ for i in range(0, len(croped_pil_image), batch_size):
148
+ images = croped_pil_image[i:i+batch_size]
149
+ image_inputs = [processor.image_processor(x, return_tensors="pt") for x in images]
150
+ inputs ={'input_ids': [], 'attention_mask': [], 'pixel_values': [], 'image_sizes': []}
151
+ texts = [prompt] * len(images)
152
+ for i, txt in enumerate(texts):
153
+ input = processor._convert_images_texts_to_inputs(image_inputs[i], txt, return_tensors="pt")
154
+ inputs['input_ids'].append(input['input_ids'])
155
+ inputs['attention_mask'].append(input['attention_mask'])
156
+ inputs['pixel_values'].append(input['pixel_values'])
157
+ inputs['image_sizes'].append(input['image_sizes'])
158
+ max_len = max([x.shape[1] for x in inputs['input_ids']])
159
+ for i, v in enumerate(inputs['input_ids']):
160
+ inputs['input_ids'][i] = torch.cat([processor.tokenizer.pad_token_id * torch.ones(1, max_len - v.shape[1], dtype=torch.long), v], dim=1)
161
+ inputs['attention_mask'][i] = torch.cat([torch.zeros(1, max_len - v.shape[1], dtype=torch.long), inputs['attention_mask'][i]], dim=1)
162
+ inputs_cat = {k: torch.concatenate(v).to(device) for k, v in inputs.items()}
163
+
164
+ generation_args = {
165
+ "max_new_tokens": 25,
166
+ "temperature": 0.01,
167
+ "do_sample": False,
168
+ }
169
+ generate_ids = model.generate(**inputs_cat, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
170
+ # # remove input tokens
171
+ generate_ids = generate_ids[:, inputs_cat['input_ids'].shape[1]:]
172
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
173
+ response = [res.strip('\n').strip() for res in response]
174
+ generated_texts.extend(response)
175
+
176
+ return generated_texts
177
+
178
+ def remove_overlap(boxes, iou_threshold, ocr_bbox=None):
179
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
180
+
181
+ def box_area(box):
182
+ return (box[2] - box[0]) * (box[3] - box[1])
183
+
184
+ def intersection_area(box1, box2):
185
+ x1 = max(box1[0], box2[0])
186
+ y1 = max(box1[1], box2[1])
187
+ x2 = min(box1[2], box2[2])
188
+ y2 = min(box1[3], box2[3])
189
+ return max(0, x2 - x1) * max(0, y2 - y1)
190
+
191
+ def IoU(box1, box2):
192
+ intersection = intersection_area(box1, box2)
193
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
194
+ if box_area(box1) > 0 and box_area(box2) > 0:
195
+ ratio1 = intersection / box_area(box1)
196
+ ratio2 = intersection / box_area(box2)
197
+ else:
198
+ ratio1, ratio2 = 0, 0
199
+ return max(intersection / union, ratio1, ratio2)
200
+
201
+ def is_inside(box1, box2):
202
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
203
+ intersection = intersection_area(box1, box2)
204
+ ratio1 = intersection / box_area(box1)
205
+ return ratio1 > 0.95
206
+
207
+ boxes = boxes.tolist()
208
+ filtered_boxes = []
209
+ if ocr_bbox:
210
+ filtered_boxes.extend(ocr_bbox)
211
+ # print('ocr_bbox!!!', ocr_bbox)
212
+ for i, box1 in enumerate(boxes):
213
+ # if not any(IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2) for j, box2 in enumerate(boxes) if i != j):
214
+ is_valid_box = True
215
+ for j, box2 in enumerate(boxes):
216
+ # keep the smaller box
217
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
218
+ is_valid_box = False
219
+ break
220
+ if is_valid_box:
221
+ # add the following 2 lines to include ocr bbox
222
+ if ocr_bbox:
223
+ # only add the box if it does not overlap with any ocr bbox
224
+ if not any(IoU(box1, box3) > iou_threshold and not is_inside(box1, box3) for k, box3 in enumerate(ocr_bbox)):
225
+ filtered_boxes.append(box1)
226
+ else:
227
+ filtered_boxes.append(box1)
228
+ return torch.tensor(filtered_boxes)
229
+
230
+
231
+ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
232
+ '''
233
+ ocr_bbox format: [{'type': 'text', 'bbox':[x,y], 'interactivity':False, 'content':str }, ...]
234
+ boxes format: [{'type': 'icon', 'bbox':[x,y], 'interactivity':True, 'content':None }, ...]
235
+
236
+ '''
237
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
238
+
239
+ def box_area(box):
240
+ return (box[2] - box[0]) * (box[3] - box[1])
241
+
242
+ def intersection_area(box1, box2):
243
+ x1 = max(box1[0], box2[0])
244
+ y1 = max(box1[1], box2[1])
245
+ x2 = min(box1[2], box2[2])
246
+ y2 = min(box1[3], box2[3])
247
+ return max(0, x2 - x1) * max(0, y2 - y1)
248
+
249
+ def IoU(box1, box2):
250
+ intersection = intersection_area(box1, box2)
251
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
252
+ if box_area(box1) > 0 and box_area(box2) > 0:
253
+ ratio1 = intersection / box_area(box1)
254
+ ratio2 = intersection / box_area(box2)
255
+ else:
256
+ ratio1, ratio2 = 0, 0
257
+ return max(intersection / union, ratio1, ratio2)
258
+
259
+ def is_inside(box1, box2):
260
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
261
+ intersection = intersection_area(box1, box2)
262
+ ratio1 = intersection / box_area(box1)
263
+ return ratio1 > 0.80
264
+
265
+ # boxes = boxes.tolist()
266
+ filtered_boxes = []
267
+ if ocr_bbox:
268
+ filtered_boxes.extend(ocr_bbox)
269
+ # print('ocr_bbox!!!', ocr_bbox)
270
+ for i, box1_elem in enumerate(boxes):
271
+ box1 = box1_elem['bbox']
272
+ is_valid_box = True
273
+ for j, box2_elem in enumerate(boxes):
274
+ # keep the smaller box
275
+ box2 = box2_elem['bbox']
276
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
277
+ is_valid_box = False
278
+ break
279
+ if is_valid_box:
280
+ if ocr_bbox:
281
+ # keep yolo boxes + prioritize ocr label
282
+ box_added = False
283
+ ocr_labels = ''
284
+ for box3_elem in ocr_bbox:
285
+ if not box_added:
286
+ box3 = box3_elem['bbox']
287
+ if is_inside(box3, box1): # ocr inside icon
288
+ # box_added = True
289
+ # delete the box3_elem from ocr_bbox
290
+ try:
291
+ # gather all ocr labels
292
+ ocr_labels += box3_elem['content'] + ' '
293
+ filtered_boxes.remove(box3_elem)
294
+ except:
295
+ continue
296
+ # break
297
+ elif is_inside(box1, box3): # icon inside ocr, don't added this icon box, no need to check other ocr bbox bc no overlap between ocr bbox, icon can only be in one ocr box
298
+ box_added = True
299
+ break
300
+ else:
301
+ continue
302
+ if not box_added:
303
+ if ocr_labels:
304
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels, 'source':'box_yolo_content_ocr'})
305
+ else:
306
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, 'source':'box_yolo_content_yolo'})
307
+ else:
308
+ filtered_boxes.append(box1)
309
+ return filtered_boxes # torch.tensor(filtered_boxes)
310
+
311
+
312
+ def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
313
+ transform = T.Compose(
314
+ [
315
+ T.RandomResize([800], max_size=1333),
316
+ T.ToTensor(),
317
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
318
+ ]
319
+ )
320
+ image_source = Image.open(image_path).convert("RGB")
321
+ image = np.asarray(image_source)
322
+ image_transformed, _ = transform(image_source, None)
323
+ return image, image_transformed
324
+
325
+
326
+ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str], text_scale: float,
327
+ text_padding=5, text_thickness=2, thickness=3) -> np.ndarray:
328
+ """
329
+ This function annotates an image with bounding boxes and labels.
330
+
331
+ Parameters:
332
+ image_source (np.ndarray): The source image to be annotated.
333
+ boxes (torch.Tensor): A tensor containing bounding box coordinates. in cxcywh format, pixel scale
334
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
335
+ phrases (List[str]): A list of labels for each bounding box.
336
+ text_scale (float): The scale of the text to be displayed. 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
337
+
338
+ Returns:
339
+ np.ndarray: The annotated image.
340
+ """
341
+ h, w, _ = image_source.shape
342
+ boxes = boxes * torch.Tensor([w, h, w, h])
343
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
344
+ xywh = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xywh").numpy()
345
+ detections = sv.Detections(xyxy=xyxy)
346
+
347
+ labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
348
+
349
+ box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
350
+ annotated_frame = image_source.copy()
351
+ annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
352
+
353
+ label_coordinates = {f"{phrase}": v for phrase, v in zip(phrases, xywh)}
354
+ return annotated_frame, label_coordinates
355
+
356
+
357
+ def predict(model, image, caption, box_threshold, text_threshold):
358
+ """ Use huggingface model to replace the original model
359
+ """
360
+ model, processor = model['model'], model['processor']
361
+ device = model.device
362
+
363
+ inputs = processor(images=image, text=caption, return_tensors="pt").to(device)
364
+ with torch.no_grad():
365
+ outputs = model(**inputs)
366
+
367
+ results = processor.post_process_grounded_object_detection(
368
+ outputs,
369
+ inputs.input_ids,
370
+ box_threshold=box_threshold, # 0.4,
371
+ text_threshold=text_threshold, # 0.3,
372
+ target_sizes=[image.size[::-1]]
373
+ )[0]
374
+ boxes, logits, phrases = results["boxes"], results["scores"], results["labels"]
375
+ return boxes, logits, phrases
376
+
377
+
378
+ def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
379
+ """ Use huggingface model to replace the original model
380
+ """
381
+ # model = model['model']
382
+ if scale_img:
383
+ result = model.predict(
384
+ source=image,
385
+ conf=box_threshold,
386
+ imgsz=imgsz,
387
+ iou=iou_threshold, # default 0.7
388
+ )
389
+ else:
390
+ result = model.predict(
391
+ source=image,
392
+ conf=box_threshold,
393
+ iou=iou_threshold, # default 0.7
394
+ )
395
+ boxes = result[0].boxes.xyxy#.tolist() # in pixel space
396
+ conf = result[0].boxes.conf
397
+ phrases = [str(i) for i in range(len(boxes))]
398
+
399
+ return boxes, conf, phrases
400
+
401
+ def int_box_area(box, w, h):
402
+ x1, y1, x2, y2 = box
403
+ int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
404
+ area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
405
+ return area
406
+
407
+ def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=128):
408
+ """Process either an image path or Image object
409
+
410
+ Args:
411
+ image_source: Either a file path (str) or PIL Image object
412
+ ...
413
+ """
414
+ if isinstance(image_source, str):
415
+ image_source = Image.open(image_source)
416
+ image_source = image_source.convert("RGB") # for CLIP
417
+ w, h = image_source.size
418
+ if not imgsz:
419
+ imgsz = (h, w)
420
+ # print('image size:', w, h)
421
+ xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
422
+ xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
423
+ image_source = np.asarray(image_source)
424
+ phrases = [str(i) for i in range(len(phrases))]
425
+
426
+ # annotate the image with labels
427
+ if ocr_bbox:
428
+ ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
429
+ ocr_bbox=ocr_bbox.tolist()
430
+ else:
431
+ print('no ocr bbox!!!')
432
+ ocr_bbox = None
433
+
434
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt, 'source': 'box_ocr_content_ocr'} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
435
+ xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
436
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
437
+
438
+ # sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
439
+ filtered_boxes_elem = sorted(filtered_boxes, key=lambda x: x['content'] is None)
440
+ # get the index of the first 'content': None
441
+ starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
442
+ filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
443
+ print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
444
+
445
+ # get parsed icon local semantics
446
+ time1 = time.time()
447
+ if use_local_semantics:
448
+ caption_model = caption_model_processor['model']
449
+ if 'phi3_v' in caption_model.config.model_type:
450
+ parsed_content_icon = get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor)
451
+ else:
452
+ parsed_content_icon = get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=prompt,batch_size=batch_size)
453
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
454
+ icon_start = len(ocr_text)
455
+ parsed_content_icon_ls = []
456
+ # fill the filtered_boxes_elem None content with parsed_content_icon in order
457
+ for i, box in enumerate(filtered_boxes_elem):
458
+ if box['content'] is None:
459
+ box['content'] = parsed_content_icon.pop(0)
460
+ for i, txt in enumerate(parsed_content_icon):
461
+ parsed_content_icon_ls.append(f"Icon Box ID {str(i+icon_start)}: {txt}")
462
+ parsed_content_merged = ocr_text + parsed_content_icon_ls
463
+ else:
464
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
465
+ parsed_content_merged = ocr_text
466
+ print('time to get parsed content:', time.time()-time1)
467
+
468
+ filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
469
+
470
+ phrases = [i for i in range(len(filtered_boxes))]
471
+
472
+ # draw boxes
473
+ if draw_bbox_config:
474
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, **draw_bbox_config)
475
+ else:
476
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, text_scale=text_scale, text_padding=text_padding)
477
+
478
+ pil_img = Image.fromarray(annotated_frame)
479
+ buffered = io.BytesIO()
480
+ pil_img.save(buffered, format="PNG")
481
+ encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
482
+ if output_coord_in_ratio:
483
+ label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
484
+ assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
485
+
486
+ return encoded_image, label_coordinates, filtered_boxes_elem
487
+
488
+
489
+ def get_xywh(input):
490
+ x, y, w, h = input[0][0], input[0][1], input[2][0] - input[0][0], input[2][1] - input[0][1]
491
+ x, y, w, h = int(x), int(y), int(w), int(h)
492
+ return x, y, w, h
493
+
494
+ def get_xyxy(input):
495
+ x, y, xp, yp = input[0][0], input[0][1], input[2][0], input[2][1]
496
+ x, y, xp, yp = int(x), int(y), int(xp), int(yp)
497
+ return x, y, xp, yp
498
+
499
+ def get_xywh_yolo(input):
500
+ x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
501
+ x, y, w, h = int(x), int(y), int(w), int(h)
502
+ return x, y, w, h
503
+
504
+ def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
505
+ if isinstance(image_source, str):
506
+ image_source = Image.open(image_source)
507
+ if image_source.mode == 'RGBA':
508
+ # Convert RGBA to RGB to avoid alpha channel issues
509
+ image_source = image_source.convert('RGB')
510
+ image_np = np.array(image_source)
511
+ w, h = image_source.size
512
+ if use_paddleocr:
513
+ if easyocr_args is None:
514
+ text_threshold = 0.5
515
+ else:
516
+ text_threshold = easyocr_args['text_threshold']
517
+ result = paddle_ocr.ocr(image_np, cls=False)[0]
518
+ coord = [item[0] for item in result if item[1][1] > text_threshold]
519
+ text = [item[1][0] for item in result if item[1][1] > text_threshold]
520
+ else: # EasyOCR
521
+ if easyocr_args is None:
522
+ easyocr_args = {}
523
+ result = reader.readtext(image_np, **easyocr_args)
524
+ coord = [item[0] for item in result]
525
+ text = [item[1] for item in result]
526
+ if display_img:
527
+ opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
528
+ bb = []
529
+ for item in coord:
530
+ x, y, a, b = get_xywh(item)
531
+ bb.append((x, y, a, b))
532
+ cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
533
+ # matplotlib expects RGB
534
+ plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
535
+ else:
536
+ if output_bb_format == 'xywh':
537
+ bb = [get_xywh(item) for item in coord]
538
+ elif output_bb_format == 'xyxy':
539
+ bb = [get_xyxy(item) for item in coord]
540
+ return (text, bb), goal_filtering