-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathDetection-Tracking.py
More file actions
76 lines (63 loc) · 2.96 KB
/
Detection-Tracking.py
File metadata and controls
76 lines (63 loc) · 2.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from algorithm.object_detector import YOLOv7
from utils.detections import draw
from tqdm import tqdm
import numpy as np
import cv2
yolov7 = YOLOv7()
yolov7.load('coco.weights', classes='coco.yaml', device='cpu')
video = cv2.VideoCapture('video.mp4')
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(video.get(cv2.CAP_PROP_FPS))
frames_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
output = cv2.VideoWriter('output.mp4', fourcc, fps, (width, height))
if video.isOpened() == False:
print('[!] error opening the video')
print('[+] tracking video...\n')
pbar = tqdm(total=frames_count, unit=' frames', dynamic_ncols=True, position=0, leave=True)
lines = {}
arrow_lines = []
arrow_line_length = 50
try:
while video.isOpened():
ret, frame = video.read()
if ret == True:
detections = yolov7.detect(frame, track=True)
detected_frame = frame
for detection in detections:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
if 'id' in detection:
detection_id = detection['id']
if detection_id not in lines:
detection['color'] = color
lines[detection_id] = {'points':[], 'arrows':[], 'color':color}
else:
detection['color'] = lines[detection_id]['color']
lines[detection_id]['points'].append(np.array([detection['x'] + detection['width']/2, detection['y'] + detection['height']/2], np.int32))
points = lines[detection_id]['points']
if len(points) >= 2:
arrow_lines = lines[detection_id]['arrows']
if len(arrow_lines) > 0:
distance = np.linalg.norm(points[-1] - arrow_lines[-1]['end'])
if distance >= arrow_line_length:
start = np.rint(arrow_lines[-1]['end'] - ((arrow_lines[-1]['end'] - points[-1])/distance)*10).astype(int)
arrow_lines.append({'start':start, 'end':points[-1]})
else:
distance = 0
arrow_lines.append({'start':points[-2], 'end':points[-1]})
for line in lines.values():
arrow_lines = line['arrows']
for arrow_line in arrow_lines:
detected_frame = cv2.arrowedLine(detected_frame, arrow_line['start'], arrow_line['end'], line['color'], 2, line_type=cv2.LINE_AA)
detected_frame = draw(frame, detections)
output.write(detected_frame)
pbar.update(1)
else:
break
except KeyboardInterrupt:
pass
pbar.close()
video.release()
output.release()
yolov7.unload()