Skip to content

Commit c9a0469

Browse files
authored
Merge pull request #26 from Capstone-Projects-2024-Spring/gesture-ticket
Gesture ticket
2 parents 7676eb5 + 255d46f commit c9a0469

File tree

5 files changed

+192
-85
lines changed

5 files changed

+192
-85
lines changed

app/app.py

Lines changed: 82 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -28,78 +28,75 @@ def thumbClassifier(results):
2828
return GestureObject.gesture
2929

3030

31-
32-
def preprocessHandRegion(handRegion):
33-
#resize the image to the same resolution used in the dataset
34-
resized_hand = cv2.resize(handRegion, (224,224))
35-
normalized_hand = resized_hand / 255.0
36-
37-
reshaped_hand = np.reshape(normalized_hand, (224,224, 3))
38-
batch_hand = np.expand_dims(reshaped_hand, axis=0)
39-
return batch_hand
40-
41-
42-
43-
def detectHand(hands, img, cTime, pTime, ASLModel, colors):
44-
31+
def detectHand(hands, img, ASLModel):
32+
#comment this in if meidapipe doesnt work
33+
#return "thumbs up", img
4534
gestureName=""
46-
#success, img = cap.read()
47-
cv2.putText(img, "looking for ASL gestures", (int(img.shape[1]/2),20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 2)
48-
if img is None:
49-
print("empty camera frame!!!!!")
50-
35+
if img is None: print("empty camera frame!!!!!")
36+
5137
results = hands.process(img)
5238
if results.multi_hand_landmarks:
53-
#get the dimensions for the cropped image
39+
5440
minX,minY,maxX,maxY=createSquare(results,img)
55-
# Draw the square bounding box
56-
cv2.rectangle(img, (minX, minY), (maxX, maxY), (colors, colors, colors), 2)
57-
if minX < maxX and minY < maxY:
58-
handRegion = img[minY:maxY, minX:maxX]
59-
#Preprocess the hand region for the ASL model
60-
preprocessedHand = preprocessHandRegion(handRegion)
61-
#Predict the ASL gesture given by user
62-
#asl_prediction = ASLModel.predict(preprocessedHand)
63-
asl_prediction = 1
64-
#turning the gesture from clas number to real name and adding to video feed
65-
#gestureName = "Detected Gesture: " + IdentifyGesture(np.argmax(asl_prediction))
66-
gestureName = thumbClassifier(results)
67-
cTime = time.time()
68-
fps = 1/(cTime-pTime)
69-
pTime = cTime
70-
71-
#adding all the text before displaying the image
72-
cv2.putText(img, gestureName, (10, 130), cv2.FONT_HERSHEY_PLAIN, 2, (colors, colors, colors), 2)
73-
cv2.putText(img,str(int(fps)), (10,70), cv2.FONT_HERSHEY_PLAIN, 3, (colors,50,colors), 3)
74-
#cv2.imshow("Image", img)
75-
cv2.waitKey(1)
76-
global latest_gesture # Declare it as global inside the function if you're updating it
77-
# Update this line accordingly in your detectHand function
41+
cv2.rectangle(img, (minX, minY), (maxX, maxY), (155, 155, 155), 2)
42+
gestureName = thumbClassifier(results)
43+
else:
44+
gestureName ='No gesture detected'
45+
global latest_gesture
7846
latest_gesture = gestureName # gestureName is the detected gesture
79-
return pTime, cTime, gestureName,img
47+
return gestureName,img
48+
8049

81-
app = Flask(__name__)
82-
mpHands = mp.solutions.hands
83-
hands = mpHands.Hands(static_image_mode=False,
84-
max_num_hands=1,
85-
min_detection_confidence=0.5,
86-
min_tracking_confidence=0.5)
87-
latest_gesture = 'No gesture detected yet'
88-
firstGesture, secondGesture = 'No gesture detected yet','No gesture detected yet'
89-
firstQueue,secondQueue = deque(maxlen=30),deque(maxlen=30)
9050

91-
# Your existing code modified for Flask will go here
9251

52+
def detect_motion(last_frame, current_frame, threshold=50):
53+
# Convert frames to grayscale
54+
gray_last = cv2.cvtColor(last_frame, cv2.COLOR_BGR2GRAY)
55+
gray_current = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
56+
57+
# Apply Gaussian Blur to reduce noise and detail
58+
gray_last = cv2.GaussianBlur(gray_last, (21, 21), 0)
59+
gray_current = cv2.GaussianBlur(gray_current, (21, 21), 0)
60+
61+
# Compute the absolute difference between the current frame and reference frame
62+
frame_diff = cv2.absdiff(gray_last, gray_current)
63+
64+
# Threshold the difference
65+
_, thresh = cv2.threshold(frame_diff, threshold, 255, cv2.THRESH_BINARY)
66+
67+
# Find contours to see if there are significant changes
68+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
69+
70+
# Return True if contours are found
71+
return len(contours) > 0
9372

94-
def gen_frames(cap): # Generator function
73+
def detection(cap,queue):
9574
while True:
75+
success, img = cap.read()
76+
if not success:
77+
return False, None
78+
detected, frame = detectHand(hands,img, '')
79+
print(detected)
80+
if detected: queue.append(detected)
81+
if len(queue) ==30 and len(set(queue))==1:
82+
global firstGesture
83+
firstGesture = set(queue).pop()
84+
queue.clear()
85+
return firstGesture
86+
87+
88+
89+
def gen_frames(cap):
90+
inMotion = False
91+
last_frame = None
92+
while True:
93+
9694
success, img = cap.read()
9795
if not success:
9896
break
9997
else:
100-
# Your hand detection and instructions code integrated here
101-
# Instead of cv2.imshow, convert frame to bytes and yield
102-
pTime,cTime, detected, frame = detectHand(hands,img, 0,0, '', 155)
98+
99+
detected, frame = detectHand(hands,img, '')
103100
print(detected)
104101
if detected: firstQueue.append(detected)
105102
if len(firstQueue) ==30 and len(set(firstQueue))==1:
@@ -113,16 +110,20 @@ def gen_frames(cap): # Generator function
113110
if not success:
114111
break
115112
else:
116-
pTime,cTime, detected, frame = detectHand(hands,img, pTime,cTime, '', 155)
113+
detected, frame = detectHand(hands,img, '')
117114
if detected: secondQueue.append(detected)
118-
ret, buffer = cv2.imencode('.jpg', img)
119-
img = buffer.tobytes()
115+
120116
if len(secondQueue)== 30 and len(set(secondQueue))==1:
121117
global secondGesture
122118
secondGesture = set(secondQueue).pop()
123119
print('both gestures are',firstGesture,secondGesture)
124-
time.sleep(3)
125-
quit()
120+
time.sleep(2)
121+
firstGesture,secondGesture = 'No gesture detected','No gesture detected'
122+
secondQueue.clear()
123+
124+
break
125+
ret, buffer = cv2.imencode('.jpg', img)
126+
img = buffer.tobytes()
126127
yield (b'--frame\r\n'
127128
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
128129
#cv2.putText(img,'put text on the frame', (10,130), cv2.FONT_HERSHEY_PLAIN, 3, (100,50,100), 3)
@@ -132,6 +133,22 @@ def gen_frames(cap): # Generator function
132133
yield (b'--frame\r\n'
133134
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
134135

136+
137+
app = Flask(__name__)
138+
#comment this out if mediapipe doesnt work
139+
mpHands = mp.solutions.hands
140+
hands = mpHands.Hands(static_image_mode=False,
141+
max_num_hands=1,
142+
min_detection_confidence=0.5,
143+
min_tracking_confidence=0.5)
144+
#until here
145+
#comment the next line in if mediapipe doesn't work
146+
#hands = ""
147+
latest_gesture = 'No gesture detected'
148+
firstGesture, secondGesture = 'No gesture detected yet','No gesture detected'
149+
firstQueue,secondQueue = deque(maxlen=30),deque(maxlen=30)
150+
151+
135152
@app.route('/video_feed')
136153
def video_feed():
137154
# Assuming 'cap' is your cv2.VideoCapture object
@@ -148,4 +165,6 @@ def current_gesture():
148165
return jsonify(gesture=latest_gesture, firstGesture = firstGesture, secondGesture = secondGesture)
149166

150167
if __name__ == "__main__":
168+
169+
151170
app.run(debug=True)

app/methods.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import math,cv2, time
2-
2+
import numpy as np
33

44
class hand:
55
def __init__(self,hand):
@@ -143,6 +143,15 @@ def thumbClassifier(results):
143143
return GestureObject.gesture
144144

145145

146+
def preprocessHandRegion(handRegion):
147+
#resize the image to the same resolution used in the dataset
148+
resized_hand = cv2.resize(handRegion, (224,224))
149+
normalized_hand = resized_hand / 255.0
150+
151+
reshaped_hand = np.reshape(normalized_hand, (224,224, 3))
152+
batch_hand = np.expand_dims(reshaped_hand, axis=0)
153+
return batch_hand
154+
146155

147156
def InstructionCommand(hands, img, cTime, pTime,firstDetected):
148157
result = ""

app/readme.md

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# IntelliGest Smart Home
2+
3+
## How to Run the Project
4+
5+
*To run this project, you will most likely need to be in the Professor's office, as our demo will be using our Raspberry Pi device.*
6+
1. The first step of running our project is to turn on and connect the Raspberry Pi to the wifi, screen, camera, and light. *If you're using our setup, you only need to connect to the wifi, and connect the light to any power source through a micro-USB. Follow the steps outlined [here](#connecting-the-pi-and-the-light-to-the-same-network) to connect*
7+
2. Once the Raspberry Pi is connected and configured, navigate to the `app` subfolder of the `Project-Intelligest-Smart-Home` repository.
8+
This can be done using the `cd` command if you are using the terminal and have opened the repository
9+
3. Once in the `app` subfolder, run the command `python -m flask run` to start the program. The program should take a few seconds to start up.
10+
4. Once the program has output the link to the flask server, open the link in a web browser.
11+
5. The web page should display a live feed from the camera, and the program should be running.
12+
6. To test the program, make a thumbs up, thumbs down, and thumbs flat gesture to make sure the model is correctly detecting gestures.
13+
7. After this test, hold your index finger up for the camera until the gesture is detected as your first gesture.
14+
8. Next, hold a thumbs-up gesture in front of the camera. After this gesture has been held for a few seconds, the web page should show the gestures you used.
15+
9. Once these gestures are detected, the light should turn on through the home assistant API.
16+
17+
## Connecting the Pi and the Light to the Same Network
18+
With Home Assistant, any devices in the system _must_ be connected to the same network as the device running Home Assistant. For demo and testing purposes, you will need to host this network yourself.
19+
We recommend utilizing a laptop to provide "shared internet". Both macOS and Windows have this feature.
20+
21+
For minimal setup, configure the network as follows:
22+
- SSID (network name): "showthesign"
23+
- Password: "f334p8ofpehgb"
24+
- Channel: 1, 6, or 11 (if required, it *MUST* be one of these channels since the light communicates over 2.4GHz band)
25+
- WPA2/WPA3 Security
26+
27+
With this configuration, the Raspberry Pi already recognizes this connection and so do the ESPs running the light. To configure your own network with your own name and password, first edit `livingroom.yaml` to include your chosen ssid and password and then you will need to reflash the ESP by connecting it to your laptop and running `esphome run livingroom.yaml` through the terminal. ***We highly recommend just setting up the internet share with the defaults we provide***
28+
29+
Once both the ESP and the main device are connected to the same network, you are good to start using the project, following the steps outlined above!

app/templates/index.html

Lines changed: 65 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,67 +1,111 @@
1-
<!DOCTYPE html>
1+
</html><!DOCTYPE html>
22
<html>
33
<head>
44
<title>Video Stream</title>
55
<style>
6+
html, body {
7+
margin: 0; /* Removes the margin from the body */
8+
padding: 0; /* Removes padding from the body */
9+
overflow: auto; /* Prevents scrolling */
10+
height: 100%; /* Ensures the body takes full viewport height */
11+
}
12+
613
.content-container {
714
display: flex;
8-
justify-content: space-between;
9-
align-items: flex-start;
15+
font-size = 20px;
16+
flex-direction: row; /* Adjusts direction to row to place video and blocks side by side */
17+
align-items: start;
18+
justify-content: center;
19+
flex-wrap: nowrap; /* Prevents wrapping to ensure no scrolling */
20+
height: 100vh; /* Sets the container height to fill the viewport */
1021
}
11-
22+
1223
.video-container {
13-
margin-right: 20px;
24+
flex: 1; /* Allows the video container to grow and fill available space */
25+
max-width: 40%; /* Limits the maximum width of the video container */
26+
/* Adjusts margin and padding as needed */
27+
}
28+
29+
.blocks-container {
1430
display: flex;
1531
flex-direction: column;
32+
justify-content: start;
33+
margin-left: auto;
34+
flex-grow: 0;
35+
width: 20%;
36+
height: 100vh;
37+
overflow-y: hidden;
38+
padding: 10px 0; /* Adds padding to the top and bottom */
1639
}
17-
18-
.all-gestures-container {
40+
41+
.block {
42+
margin: 5px 10px; /* Adjust margins for left and right */
1943
display: flex;
2044
flex-direction: column;
45+
justify-content: space-between; /* Ensures space is distributed between img and text */
46+
align-items: center;
47+
flex-grow: 1; /* Allows block to grow as needed */
48+
min-height: 100px; /* Sets a minimum height */
2149
}
22-
23-
.video-container img,
24-
.all-gestures-container img {
25-
width: auto;
50+
51+
.block img {
52+
width: auto; /* Allows width to adjust based on height */
53+
max-height: 60%; /* Limits image height to leave space for text */
54+
object-fit: contain; /* Ensures image is scaled properly without distortion */
2655
}
56+
57+
.block p {
58+
text-align: center;
59+
margin-top: 5px; /* Ensures a small space between the image and the text */
60+
flex-shrink: 0; /* Prevents text from shrinking */
61+
width: 100%; /* Ensures text width matches block width */
62+
overflow: hidden; /* Hides overflow text */
63+
text-overflow: ellipsis; /* Adds an ellipsis to text that overflows */
64+
white-space: nowrap; /* Keeps text in a single line */
65+
}
66+
/* Your existing .block and .block img styles */
2767
</style>
2868
</head>
2969
<body>
3070
<div class="content-container">
3171
<!-- Video Stream -->
32-
<div class="video-container" justify-content= "left">
33-
<h1>Video Stream</h1>
34-
<img src="{{ url_for('video_feed') }}" alt="Video Feed" style="width: 500px;">
35-
<!--<img src="{{ url_for('static', filename='images/all-gestures.png') }}" alt="Video Feed" style="width: 500px;">-->
36-
<div id="gesture">Current Gesture: No gesture detected yet</div>
72+
<div class="video-container">
73+
<img src="{{ url_for('video_feed') }}" alt="Video Feed" style="width: 100%; max-width: 640px; height: auto;">
74+
3775
<div id="gestures">
76+
<p>Current Gesture: <span id="currentGesture">Waiting...</span></p>
3877
<p>First Gesture: <span id="firstGesture">Waiting...</span></p>
3978
<p>Second Gesture: <span id="secondGesture">Waiting...</span></p>
4079
</div>
4180
</div>
4281

4382
<!-- Blocks Container (already correctly styled) -->
44-
<div class="blocks-container" justify-content= "right">
83+
<div class="blocks-container">
4584
<div class="block">
46-
<img src="{{ url_for('static', filename='images/all-gestures.png') }}" alt="Gestures" style="width: 500px;">
85+
<img src="{{ url_for('static', filename='images/light.png') }}" alt="Light">
4786
<p>Light Gesture</p>
4887
</div>
49-
<!--<div class="block">
88+
<div class="block">
5089
<img src="{{ url_for('static', filename='images/news.png') }}" alt="News">
5190
<p>News Gesture</p>
5291
</div>
5392
<div class="block">
5493
<img src="{{ url_for('static', filename='images/weather.png') }}" alt="Weather">
5594
<p>Weather Gesture</p>
56-
</div> -->
95+
</div>
96+
<div class="block">
97+
<img src="{{ url_for('static', filename='images/all-gestures.png') }}" alt="Gestures" style="width: 500px;">
98+
<p>All Gestures</p>
99+
</div>
100+
57101
</div>
58102
</div>
59103
<script>
60104
setInterval(function() {
61105
fetch('/current_gesture')
62106
.then(response => response.json())
63107
.then(data => {
64-
document.getElementById('gesture').innerHTML = 'Current Gesture: ' + data.gesture;
108+
document.getElementById('currentGesture').textContent = data.gesture || 'No gesture detected yet';
65109
document.getElementById('firstGesture').textContent = data.firstGesture || 'No gesture detected yet';
66110
document.getElementById('secondGesture').textContent = data.secondGesture || 'No gesture detected yet';
67111

app/toDoList.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
Make images of hands into buttons for the instructions(light, weather etc)
2+
Make sure web page loads and looks okay on raspberry pi screen
3+
Add in method to turn on and off light based on first and second gesture.
4+
Get the available devices displayed in a grid format after the first gesture. Must have for demo after this one
5+
Make html look more official, use css to make it look more like home assistant
6+

0 commit comments

Comments
 (0)