generated from roboflow/template-python
-
Notifications
You must be signed in to change notification settings - Fork 3.1k
Expand file tree
/
Copy pathultralytics_file_example.py
More file actions
101 lines (86 loc) · 3.51 KB
/
ultralytics_file_example.py
File metadata and controls
101 lines (86 loc) · 3.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import cv2
import numpy as np
from ultralytics import YOLO
from utils.general import find_in_list, load_zones_config
from utils.timers import FPSBasedTimer
import supervision as sv
COLORS = sv.ColorPalette.from_hex(["#E6194B", "#3CB44B", "#FFE119", "#3C76D1"])
COLOR_ANNOTATOR = sv.ColorAnnotator(color=COLORS)
LABEL_ANNOTATOR = sv.LabelAnnotator(
color=COLORS, text_color=sv.Color.from_hex("#000000")
)
def main(
zone_configuration_path: str,
source_video_path: str,
weights: str = "yolov8s.pt",
device: str = "cpu",
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7,
classes: list[int] = [],
) -> None:
"""
Calculating detections dwell time in zones, using video file.
Args:
zone_configuration_path: Path to the zone configuration JSON file
source_video_path: Path to the source video file
weights: Path to the model weights file
device: Computation device ('cpu', 'mps' or 'cuda')
confidence_threshold: Confidence level for detections (0 to 1)
iou_threshold: IOU threshold for non-max suppression
classes: List of class IDs to track. If empty, all classes are tracked
"""
model = YOLO(weights)
tracker = sv.ByteTrack(minimum_matching_threshold=0.5)
video_info = sv.VideoInfo.from_video_path(video_path=source_video_path)
frames_generator = sv.get_video_frames_generator(source_video_path)
polygons = load_zones_config(file_path=zone_configuration_path)
zones = [
sv.PolygonZone(
polygon=polygon,
triggering_anchors=(sv.Position.CENTER,),
)
for polygon in polygons
]
timers = [FPSBasedTimer(video_info.fps) for _ in zones]
for frame in frames_generator:
results = model(
frame,
verbose=False,
device=device,
conf=confidence_threshold,
iou=iou_threshold,
)[0]
detections = sv.Detections.from_ultralytics(results)
detections = detections[find_in_list(detections.class_id, classes)]
detections = tracker.update_with_detections(detections)
annotated_frame = frame.copy()
for idx, zone in enumerate(zones):
annotated_frame = sv.draw_polygon(
scene=annotated_frame, polygon=zone.polygon, color=COLORS.by_idx(idx)
)
detections_in_zone = detections[zone.trigger(detections)]
time_in_zone = timers[idx].tick(detections_in_zone)
custom_color_lookup = np.full(detections_in_zone.class_id.shape, idx)
annotated_frame = COLOR_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
custom_color_lookup=custom_color_lookup,
)
labels = [
f"#{tracker_id} {int(time // 60):02d}:{int(time % 60):02d}"
for tracker_id, time in zip(detections_in_zone.tracker_id, time_in_zone)
]
annotated_frame = LABEL_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
labels=labels,
custom_color_lookup=custom_color_lookup,
)
cv2.imshow("Processed Video", annotated_frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
from jsonargparse import auto_cli, set_parsing_settings
set_parsing_settings(parse_optionals_as_positionals=True)
auto_cli(main, as_positional=False)