Skip to content

Commit bb907c2

Browse files
committed
Add emotion-recognition-demo
1 parent f53234b commit bb907c2

File tree

7 files changed

+33525
-0
lines changed

7 files changed

+33525
-0
lines changed
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
# KubeEdge Emotion Recognition Demo
2+
3+
## Description
4+
5+
KubeEdge Emotion Recognition is demo of emotion recognition with raspberry Pi. Raspberry Pi is equipped with an RS/E4 standard camera.
6+
7+
<img src="images/raspberry-video.jpg">
8+
9+
10+
## Prerequisites
11+
12+
### Hardware Prerequisites
13+
14+
1. RaspBerry-Pi (RaspBerry-Pi 4 has been used for this demo)
15+
2. RS/E4 Camera
16+
3. Display
17+
18+
## Steps to reproduce
19+
20+
1. Clone the kubeedge/examples repository.
21+
22+
```console
23+
git clone https://github.com/kubeedge/examples.git /root/examples
24+
```
25+
26+
2. Deploy Emotion Recognition Server.
27+
28+
```console
29+
kubectl apply -f /root/examples/kubeedge-emotion-recognition-demo/emotion-server.yaml
30+
```
31+
32+
2. Deploy Emotion Recognition Client To raspberry Pi.
33+
34+
```console
35+
kubectl apply -f /root/examples/kubeedge-emotion-recognition-demo/emotion-client.yaml
36+
```
37+
```
38+
spec:
39+
containers:
40+
- env:
41+
- name: DISPLAY
42+
value: :0
43+
- name: FACEEMOTION_SERVER
44+
value: $EMOTION_SERVER #set emotion server address
45+
- name: FACEEMOTION_PORT
46+
value: $EMOTION_CLIENT #set emotion server port
47+
volumeMounts:
48+
- mountPath: /tmp/.X11-unix
49+
name: x11
50+
- mountPath: /dev/video0
51+
name: video
52+
securityContext:
53+
privileged: true
54+
volumes:
55+
- name: x11
56+
hostPath:
57+
path: /tmp/.X11-unix #match your display device
58+
- name: video
59+
hostPath:
60+
path: /dev/video0 #match your camera device
61+
```
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
From kuramal/onnxruntime_opencv:arm32
2+
3+
RUN pip install requests
4+
5+
ADD ./emotion.py /home
6+
7+
ADD ./haarcascade_frontalface_default.xml /home
8+
9+
ENTRYPOINT [ "python", "/home/emotion.py" ]
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
#!/usr/bin/env python3
2+
# -*- coding: utf-8 -*-
3+
"""
4+
Created on Tue Jan 7 03:18:26 2020
5+
6+
@author: gaowei
7+
"""
8+
9+
import os
10+
import base64
11+
import json
12+
import requests
13+
import cv2
14+
import numpy as np
15+
16+
faceCascade = cv2.CascadeClassifier('/home/haarcascade_frontalface_default.xml')
17+
18+
env_dist = os.environ
19+
20+
faceemotion_server = env_dist.get('FACEEMOTION_SERVER')
21+
faceemotion_port = env_dist.get('FACEEMOTION_PORT')
22+
23+
cap = cv2.VideoCapture(0)
24+
timeF = 10
25+
count = 0
26+
request_url = "http://%s:%s/model/methods/predict" % (faceemotion_server, faceemotion_port)
27+
init = 0
28+
29+
while(True):
30+
count = count + 1
31+
headers = {'accept': 'application/json','content-type': 'application/json'}
32+
ret, frame = cap.read()
33+
if (count%timeF != 0):
34+
continue
35+
36+
img = cv2.flip(frame, 1)
37+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
38+
faces = faceCascade.detectMultiScale(
39+
gray,
40+
scaleFactor=1.2,
41+
minNeighbors=5,
42+
minSize=(20, 20)
43+
)
44+
45+
if len(faces) == 0 and init == 1:
46+
continue
47+
48+
init = 1
49+
50+
small_frame = cv2.resize(frame,(0,0),fx = 0.88,fy = 0.88)
51+
image = cv2.imencode('.jpg',small_frame)[1]
52+
img_BASE64 = str(base64.b64encode(image))[2:-1]
53+
54+
post_data = {"img_base64": img_BASE64}
55+
data = json.dumps(post_data).encode(encoding = 'utf-8')
56+
response = requests.post(url=request_url, data=data, headers=headers)
57+
res = json.loads(response.text)#dict
58+
res_dict = json.loads(res["value"])
59+
src = res_dict["img_url"]
60+
data_processd = src.split(',')[1]
61+
# base64解码
62+
image_data = base64.b64decode(data_processd)
63+
# 转换为np数组
64+
img_array = np.fromstring(image_data, np.uint8)
65+
# 转换成opencv可用格式
66+
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
67+
cv2.imshow('img', img)
68+
# Display the resulting frame
69+
if cv2.waitKey(1) & 0xFF == ord('q'):
70+
break
71+
cap.release()
72+
cv2.destroyAllWindows()

0 commit comments

Comments
 (0)