Skip to content

Commit 36fe525

Browse files
committed
Added emotion detection project in computer vision
1 parent a71618f commit 36fe525

6 files changed

Lines changed: 94 additions & 0 deletions

File tree

51.9 KB
Loading
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
import cv2
2+
import numpy as np
3+
import dlib
4+
from deepface import DeepFace
5+
from PIL import Image
6+
from collections import deque
7+
8+
# Load Dlib's face detector (more accurate than OpenCV Haar Cascade)
9+
detector = dlib.get_frontal_face_detector()
10+
11+
# Load the webcam
12+
cap = cv2.VideoCapture(0)
13+
14+
# Maintain a buffer of last N predictions for smoothing
15+
emotion_queue = deque(maxlen=5)
16+
17+
# Load emoji images (ensure these exist in the same directory)
18+
emoji_dict = {
19+
"happy": "happy_emoji.jpeg",
20+
"sad": "sad_emoji.webp",
21+
"angry": "angry_emoji.jpg",
22+
"surprise": "surprise_emoji.png",
23+
"neutral": "neutral_emoji.webp"
24+
}
25+
26+
while True:
27+
ret, frame = cap.read()
28+
if not ret:
29+
break
30+
31+
# Convert frame to grayscale for better face detection
32+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
33+
34+
# Detect faces using Dlib
35+
faces = detector(gray)
36+
37+
if faces:
38+
# Use DeepFace to analyze the face in the frame
39+
try:
40+
analysis = DeepFace.analyze(
41+
frame,
42+
actions=['emotion'],
43+
enforce_detection=False # ✅ FIXED: Removed 'model_name'
44+
)
45+
detected_emotion = analysis[0]['dominant_emotion']
46+
47+
# Add the latest emotion to the queue
48+
emotion_queue.append(detected_emotion)
49+
50+
# Get the most common emotion from the last few frames (smoothing)
51+
if len(emotion_queue) > 2:
52+
detected_emotion = max(set(emotion_queue), key=emotion_queue.count)
53+
54+
# Load corresponding emoji
55+
emoji_path = emoji_dict.get(detected_emotion, "neutral_emoji.png")
56+
emoji = Image.open(emoji_path).resize((100, 100))
57+
58+
# Convert OpenCV frame to PIL image for overlay
59+
frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
60+
frame_pil.paste(emoji, (50, 50), emoji)
61+
62+
# Convert back to OpenCV format
63+
frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
64+
65+
# Display detected emotion text
66+
cv2.putText(frame, detected_emotion.capitalize(), (50, 180),
67+
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
68+
69+
except Exception as e:
70+
print("Error:", e)
71+
72+
# Show the processed frame
73+
cv2.imshow("Live Emoji Detection", frame)
74+
75+
# Press 'q' to exit
76+
if cv2.waitKey(1) & 0xFF == ord('q'):
77+
break
78+
79+
cap.release()
80+
cv2.destroyAllWindows()
9.63 KB
Loading
22 KB
Loading
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Emoji Emotion Tracker
2+
3+
A real-time **emotion detection project** using DeepFace and OpenCV, with emoji overlays!
4+
5+
## Features
6+
- Detects emotions like happy, sad, angry, surprise, neutral
7+
- Displays emoji corresponding to the detected emotion
8+
- Smooths detection using last few frames
9+
- Beginner-friendly & Hacktoberfest-ready
10+
11+
## Usage
12+
```bash
13+
pip install -r requirements.txt
14+
python scripts/live_emoji_detection.py
20.1 KB
Loading

0 commit comments

Comments
 (0)