Skip to content

Commit d869f42

Browse files
authored
Merge pull request #29 from Capstone-Projects-2024-Spring/gesture-ticket
Basic page for detecting gestures and controlling a light
2 parents c9a0469 + a1a06e0 commit d869f42

File tree

5 files changed

+152
-91
lines changed

5 files changed

+152
-91
lines changed

app/app.py

Lines changed: 116 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from flask import Flask, render_template, Response, jsonify
22
import cv2
3+
import requests
34
import time
45
import cv2
56
import asyncio, os
@@ -12,20 +13,12 @@
1213
#so you will need to install it with pip install python_weather
1314
#queue to find the right gesture
1415
from collections import deque
16+
deviceChoice = None
17+
deviceStatus = None
1518

19+
#https://colab.research.google.com/github/googlesamples/mediapipe/blob/main/examples/gesture_recognizer/python/gesture_recognizer.ipynb#scrollTo=TUfAcER1oUS6
20+
#https://developers.google.com/mediapipe/solutions/vision/gesture_recognizer/python#video
1621

17-
def thumbClassifier(results):
18-
res=results.multi_hand_landmarks[0].landmark
19-
GestureObject = hand(results.multi_hand_landmarks[0])
20-
21-
# print('Thumb angle: ', thumb.angle)
22-
# print('Ring Finger angle: ', ringFinger.angle)
23-
# print('Middle Finger angle: ', middleFinger.angle)
24-
# print('Index Finger angle: ', indexFinger.angle)
25-
# print('Pinky Finger angle: ', pinkyFinger.angle)
26-
# print(wrist.x, wrist.y, wrist.z)
27-
28-
return GestureObject.gesture
2922

3023

3124
def detectHand(hands, img, ASLModel):
@@ -49,8 +42,10 @@ def detectHand(hands, img, ASLModel):
4942

5043

5144

52-
def detect_motion(last_frame, current_frame, threshold=50):
45+
def detect_motion(last_frame, current_frame, threshold=20):
5346
# Convert frames to grayscale
47+
if last_frame is None:
48+
last_frame = current_frame
5449
gray_last = cv2.cvtColor(last_frame, cv2.COLOR_BGR2GRAY)
5550
gray_current = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
5651

@@ -68,71 +63,115 @@ def detect_motion(last_frame, current_frame, threshold=50):
6863
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
6964

7065
# Return True if contours are found
71-
return len(contours) > 0
72-
73-
def detection(cap,queue):
74-
while True:
75-
success, img = cap.read()
76-
if not success:
77-
return False, None
78-
detected, frame = detectHand(hands,img, '')
79-
print(detected)
80-
if detected: queue.append(detected)
81-
if len(queue) ==30 and len(set(queue))==1:
82-
global firstGesture
83-
firstGesture = set(queue).pop()
84-
queue.clear()
85-
return firstGesture
86-
87-
66+
print('checking for motion', len(contours))
67+
return len(contours) > 0, current_frame
68+
69+
def toggle_light():
70+
#action = "turn_on" if state else "turn_off"
71+
url = f"http://localhost:8123/api/services/switch/toggle"
72+
headers = {
73+
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiIyOGU3ZDZmNTg5MjE0MzAxOWQwNTVjZWI5MThmYTcyMCIsImlhdCI6MTcxMjM0NDQ1MywiZXhwIjoyMDI3NzA0NDUzfQ.AXaP5ndD3QFtxhYxfXwT93x6qBh3GacCKmgiTHU6g7A",
74+
"Content-Type": "application/json",
75+
}
76+
data = {"entity_id": "switch.living_room_light_1"}
77+
print('toggling light',data)
78+
response = requests.post(url, json=data, headers=headers)
79+
if response.status_code == 200:
80+
# Get the new state of the light
81+
time.sleep(1)
82+
light_state = requests.get(f"http://localhost:8123/api/states/{data['entity_id']}", headers=headers).json()
83+
return light_state['state'] == 'on'
84+
return None
85+
86+
def black_image(img):
87+
black_screen = np.zeros_like(img)
88+
img = black_screen
89+
print('no motion')
90+
ret, buffer = cv2.imencode('.jpg', img)
91+
img = buffer.tobytes()
92+
93+
yield (b'--frame\r\n'
94+
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
8895

8996
def gen_frames(cap):
9097
inMotion = False
9198
last_frame = None
99+
last_motion = None
100+
global deviceStatus
101+
global deviceChoice
102+
#loop to keep the iterations of the model going
92103
while True:
93-
94104
success, img = cap.read()
95105
if not success:
106+
print('failed to read frame')
96107
break
108+
109+
inMotion,last_frame = detect_motion(last_frame, img)
110+
111+
if not inMotion:
112+
if last_motion and time.time()-last_motion > 2:
113+
print('no motion detected, black screen being shown.')
114+
img = np.zeros_like(img)
115+
ret, buffer = cv2.imencode('.jpg', img)
116+
img = buffer.tobytes()
117+
yield (b'--frame\r\n'
118+
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
119+
continue
97120
else:
98-
99-
detected, frame = detectHand(hands,img, '')
100-
print(detected)
101-
if detected: firstQueue.append(detected)
102-
if len(firstQueue) ==30 and len(set(firstQueue))==1:
103-
print("first gesture detected")
104-
global firstGesture
105-
firstGesture = set(firstQueue).pop()
106-
firstQueue.clear()
107-
while True:
108-
print('made it into second loop')
109-
success, img = cap.read()
110-
if not success:
111-
break
112-
else:
113-
detected, frame = detectHand(hands,img, '')
114-
if detected: secondQueue.append(detected)
115-
116-
if len(secondQueue)== 30 and len(set(secondQueue))==1:
117-
global secondGesture
118-
secondGesture = set(secondQueue).pop()
119-
print('both gestures are',firstGesture,secondGesture)
120-
time.sleep(2)
121-
firstGesture,secondGesture = 'No gesture detected','No gesture detected'
122-
secondQueue.clear()
123-
124-
break
125-
ret, buffer = cv2.imencode('.jpg', img)
126-
img = buffer.tobytes()
127-
yield (b'--frame\r\n'
128-
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
129-
#cv2.putText(img,'put text on the frame', (10,130), cv2.FONT_HERSHEY_PLAIN, 3, (100,50,100), 3)
130-
ret, buffer = cv2.imencode('.jpg', img)
131-
img = buffer.tobytes()
132-
133-
yield (b'--frame\r\n'
134-
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
135-
121+
last_motion = time.time()
122+
123+
detected, frame = detectHand(hands,img, '')
124+
print(detected)
125+
if detected: firstQueue.append(detected)
126+
if len(firstQueue) ==30 and len(set(firstQueue))==1 and set(firstQueue).pop() != 'No gesture detected':
127+
print("first gesture detected")
128+
global firstGesture
129+
firstGesture = set(firstQueue).pop()
130+
firstQueue.clear()
131+
132+
while True:
133+
print('made it into second loop')
134+
success, img = cap.read()
135+
if not success:
136+
break
137+
else:
138+
detected, frame = detectHand(hands,img, '')
139+
if detected: secondQueue.append(detected)
140+
141+
if len(secondQueue)== 30 and len(set(secondQueue))==1 and set(secondQueue).pop() != 'No gesture detected':
142+
global secondGesture
143+
secondGesture = set(secondQueue).pop()
144+
print('both gestures are',firstGesture,secondGesture)
145+
if firstGesture == 'thumbs up' and secondGesture == 'thumbs up':
146+
print('both thumbs up detected')
147+
deviceChoice = 'light'
148+
print('device choice is', deviceChoice)
149+
try:
150+
lightState = toggle_light()
151+
if lightState is True:
152+
deviceStatus = 'on'
153+
elif lightState is False:
154+
deviceStatus = 'off'
155+
print('Device Status is', deviceStatus)
156+
except:
157+
print('toggle light didnt work')
158+
time.sleep(5)
159+
deviceChoice, deviceStatus = 'N/A','N/A'
160+
firstGesture,secondGesture = 'No gesture detected','No gesture detected'
161+
secondQueue.clear()
162+
break
163+
164+
#writing the image in the second gesture loop, shouldn't be changed
165+
ret, buffer = cv2.imencode('.jpg', img)
166+
img = buffer.tobytes()
167+
yield (b'--frame\r\n'
168+
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
169+
170+
#writing the image in the first gesture loop, shouldn't be changed
171+
ret, buffer = cv2.imencode('.jpg', img)
172+
img = buffer.tobytes()
173+
yield (b'--frame\r\n'
174+
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
136175

137176
app = Flask(__name__)
138177
#comment this out if mediapipe doesnt work
@@ -144,8 +183,8 @@ def gen_frames(cap):
144183
#until here
145184
#comment the next line in if mediapipe doesn't work
146185
#hands = ""
147-
latest_gesture = 'No gesture detected'
148-
firstGesture, secondGesture = 'No gesture detected yet','No gesture detected'
186+
187+
latest_gesture, firstGesture, secondGesture = 'No gesture detected yet','No gesture detected','No gesture detected'
149188
firstQueue,secondQueue = deque(maxlen=30),deque(maxlen=30)
150189

151190

@@ -155,16 +194,17 @@ def video_feed():
155194
return Response(gen_frames(cv2.VideoCapture(0)),
156195
mimetype='multipart/x-mixed-replace; boundary=frame')
157196

197+
198+
158199
@app.route('/')
159200
def index():
160-
"""Video streaming home page."""
201+
161202
return render_template('index.html')
162203

163204
@app.route('/current_gesture')
164205
def current_gesture():
165-
return jsonify(gesture=latest_gesture, firstGesture = firstGesture, secondGesture = secondGesture)
206+
207+
return jsonify(gesture=latest_gesture, firstGesture = firstGesture, secondGesture = secondGesture, deviceChoice=deviceChoice, deviceStatus=deviceStatus)
166208

167209
if __name__ == "__main__":
168-
169-
170-
app.run(debug=True)
210+
app.run(debug=True)

app/methods.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,22 @@ def calculate_angle(self,p1, p2, p3):
7474

7575

7676

77+
def thumbClassifier(results):
78+
res=results.multi_hand_landmarks[0].landmark
79+
GestureObject = hand(results.multi_hand_landmarks[0])
80+
81+
# print('Thumb angle: ', thumb.angle)
82+
# print('Ring Finger angle: ', ringFinger.angle)
83+
# print('Middle Finger angle: ', middleFinger.angle)
84+
# print('Index Finger angle: ', indexFinger.angle)
85+
# print('Pinky Finger angle: ', pinkyFinger.angle)
86+
# print(wrist.x, wrist.y, wrist.z)
87+
88+
return GestureObject.gesture
89+
90+
91+
92+
7793
def createSquare(results, img):
7894
h, w, c = img.shape
7995
min_x, min_y = w, h

app/readme.md

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,17 @@
44

55
*To run this project, you will most likely need to be in the Professor's office, as our demo will be using our Raspberry Pi device.*
66
1. The first step of running our project is to turn on and connect the Raspberry Pi to the wifi, screen, camera, and light. *If you're using our setup, you only need to connect to the wifi, and connect the light to any power source through a micro-USB. Follow the steps outlined [here](#connecting-the-pi-and-the-light-to-the-same-network) to connect*
7-
2. Once the Raspberry Pi is connected and configured, navigate to the `app` subfolder of the `Project-Intelligest-Smart-Home` repository.
7+
2. In the root directory, enable the virtual environment with `source venv/bin/activate`
8+
3. In the `Models` directory, install the requirements with `pip install -r requirements.txt`. *You may need to install additional requirements that don't install due to unknown issues with the requirments file, manually install them with `pip install opencv-python`, `pip install flask`, `pip install requests`, `pip install mediapipe`*
9+
5. Once the Raspberry Pi is connected and configured, navigate to the `app` subfolder of the `Project-Intelligest-Smart-Home` repository.
810
This can be done using the `cd` command if you are using the terminal and have opened the repository
9-
3. Once in the `app` subfolder, run the command `python -m flask run` to start the program. The program should take a few seconds to start up.
10-
4. Once the program has output the link to the flask server, open the link in a web browser.
11-
5. The web page should display a live feed from the camera, and the program should be running.
12-
6. To test the program, make a thumbs up, thumbs down, and thumbs flat gesture to make sure the model is correctly detecting gestures.
13-
7. After this test, hold your index finger up for the camera until the gesture is detected as your first gesture.
14-
8. Next, hold a thumbs-up gesture in front of the camera. After this gesture has been held for a few seconds, the web page should show the gestures you used.
15-
9. Once these gestures are detected, the light should turn on through the home assistant API.
11+
6. Once in the `app` subfolder, run the command `python -m flask run` to start the program. The program should take a few seconds to start up.
12+
7. Once the program has output the link to the flask server, open the link in a web browser and enter full-screen mode.
13+
8. The web page should display a live feed from the camera, and the program should be running.
14+
9. To test the program, make a thumbs up, thumbs down, and thumbs flat gesture to make sure the model is correctly detecting gestures.
15+
10. After this test, hold your index finger up for the camera until the gesture is detected as your first gesture.
16+
11. Next, hold a thumbs-up gesture in front of the camera. After this gesture has been held for a few seconds, the web page should show the gestures you used.
17+
12. Once these gestures are detected, the light should turn on through the home assistant API.
1618

1719
## Connecting the Pi and the Light to the Same Network
1820
With Home Assistant, any devices in the system _must_ be connected to the same network as the device running Home Assistant. For demo and testing purposes, you will need to host this network yourself.

app/templates/index.html

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,11 @@
7676
<p>Current Gesture: <span id="currentGesture">Waiting...</span></p>
7777
<p>First Gesture: <span id="firstGesture">Waiting...</span></p>
7878
<p>Second Gesture: <span id="secondGesture">Waiting...</span></p>
79+
<p>Device Choice: <span id="deviceChoice">N/A</p>
80+
<p>Device Status: <span id="deviceStatus">N/A</p>
7981
</div>
8082
</div>
8183

82-
<!-- Blocks Container (already correctly styled) -->
8384
<div class="blocks-container">
8485
<div class="block">
8586
<img src="{{ url_for('static', filename='images/light.png') }}" alt="Light">
@@ -108,6 +109,8 @@
108109
document.getElementById('currentGesture').textContent = data.gesture || 'No gesture detected yet';
109110
document.getElementById('firstGesture').textContent = data.firstGesture || 'No gesture detected yet';
110111
document.getElementById('secondGesture').textContent = data.secondGesture || 'No gesture detected yet';
112+
document.getElementById('deviceChoice').textContent = data.deviceChoice || 'N/A';
113+
document.getElementById('deviceStatus').textContent = data.deviceStatus || 'N/A';
111114

112115
});
113116
}, 1000);

app/toDoList.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
Make images of hands into buttons for the instructions(light, weather etc)
2-
Make sure web page loads and looks okay on raspberry pi screen
3-
Add in method to turn on and off light based on first and second gesture.
4-
Get the available devices displayed in a grid format after the first gesture. Must have for demo after this one
5-
Make html look more official, use css to make it look more like home assistant
6-
1+
- Make images of hands into buttons for the instructions(light, weather etc)
2+
- Make sure web page loads and looks okay on raspberry pi screen
3+
- Add in method to turn on and off light based on first and second gesture.
4+
- Get the available devices displayed in a grid format after the first gesture. Must have for demo after this one
5+
- Make html look more official, use css to make it look more like home assistant
6+
- Make screen go dark when inmotion is false. Add a different and better way to

0 commit comments

Comments
 (0)