# from https://supervision.roboflow.com/develop/notebooks/annotate-video-with-detections/#detecting-objects from loguru import logger import numpy as np import supervision as sv from tqdm import tqdm import cv2 from ultralytics import YOLO model = YOLO("yolov8n.pt") path_to_video = './TUM_Dataset/rgb/front/output.mkv' selected_classes = [0, 1, 2, 3, 5] ###{0: 'person', ### 1: 'bicycle', ### 2: 'car', ### 3: 'motorcycle', ### 4: 'airplane', ### 5: 'bus', ### ... ### 79: 'toothbrush'} blur_annotator = sv.BlurAnnotator() # Create a frame generator and video info object from supervision utilities frame_generator = sv.get_video_frames_generator(path_to_video) # Create a video_info object for use in the VideoSink. video_info = sv.VideoInfo.from_video_path(video_path=path_to_video) with sv.VideoSink(target_path="blurred_output.mp4", video_info=video_info) as sink: # Iterate through frames yielded from the frame_generator. for frame in tqdm(frame_generator, total=video_info.total_frames): # Run inference on our frame. results = model(frame)[0] # Parse the result into the detections data model. detections = sv.Detections.from_ultralytics(results) # Apply bounding box to detections on a copy of the frame. annotated_frame = blur_annotator.annotate( scene=frame.copy(), detections=detections[np.isin(detections.class_id, selected_classes)] ) # Write the annotated frame to the video sink. sink.write_frame(frame=annotated_frame) #logger.info("Saving detection result in {}".format(save_file_name)) #cv2.imwrite(save_file_name, annotated_frame)