import gradio as gr import cv2 from ultralytics import YOLO # Load YOLO model model = YOLO('last.torchscript') # Replace with 'best.onnx' or 'best.torchscript' if converted # Function for image inference def detect_in_image(image): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) results = model.predict(source=image, save=False, save_txt=False) annotated_frame = results[0].plot() # Annotated frame with bounding boxes annotated_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB) # Convert to RGB return annotated_frame # Function for video inference def detect_in_video(video): cap = cv2.VideoCapture(video) output_path = "output_video.mp4" # Get video properties width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) # Create VideoWriter for saving the output video out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) # Frame generator for live streaming def frame_generator(frame_skip=6): frame_count = 0 while True: ret, frame = cap.read() if not ret: break if frame_count % frame_skip == 0: # Process every nth frame results = model.predict(source=frame, save=False, save_txt=False) annotated_frame = results[0].plot() # Annotated frame with bounding boxes # Save annotated frame to output video out.write(annotated_frame) # Convert frame to RGB for display annotated_frame_rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB) yield annotated_frame_rgb frame_count += 1 # Release resources cap.release() out.release() return frame_generator(), output_path # Build the Gradio interface with gr.Blocks(css=".header {font-size: 30px; color: #4CAF50; font-weight: bold; text-align: center;} .image-output {max-width: 400px; margin: auto;}") as app: gr.Markdown("