|
| 1 | +import cv2 |
| 2 | +import numpy as np |
| 3 | + |
| 4 | +""" |
| 5 | +Basic Motion Detection using frame differencing and background subtraction (MOG2). |
| 6 | +
|
| 7 | +Usage: |
| 8 | + - Set VIDEO_SOURCE to a video file path or an integer (e.g., 0) for webcam. |
| 9 | + - The script shows two windows: motion mask and annotated frame. |
| 10 | + - Press 'q' to quit. |
| 11 | +
|
| 12 | +Notes: |
| 13 | + - Requires OpenCV (opencv-python) and NumPy. |
| 14 | + - This example focuses on clarity and educational value, not production tuning. |
| 15 | +""" |
| 16 | + |
| 17 | + |
| 18 | +# Parameters |
| 19 | +VIDEO_SOURCE = 0 # use integer for webcam (e.g., 0) or string path for video file |
| 20 | +MIN_CONTOUR_AREA = 500 # pixels; filter tiny motions/noise |
| 21 | +MORPH_KERNEL_SIZE = (5, 5) # kernel for opening/closing |
| 22 | +DISPLAY_SCALE = 1.0 # resize factor for display |
| 23 | + |
| 24 | + |
| 25 | +def create_background_subtractor() -> cv2.BackgroundSubtractor: |
| 26 | + """ |
| 27 | + Create and return a MOG2 background subtractor with sensible defaults. |
| 28 | + """ |
| 29 | + # history=500, varThreshold=16 are common defaults; detectShadows adds robustness |
| 30 | + return cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=16, detectShadows=True) |
| 31 | + |
| 32 | + |
| 33 | +def preprocess_frame(frame: cv2.Mat) -> cv2.Mat: |
| 34 | + """ |
| 35 | + Convert to grayscale and apply Gaussian blur to suppress noise. |
| 36 | + """ |
| 37 | + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| 38 | + blurred = cv2.GaussianBlur(gray, (5, 5), 0) |
| 39 | + return blurred |
| 40 | + |
| 41 | + |
| 42 | +def frame_difference(prev_gray: cv2.Mat, curr_gray: cv2.Mat) -> cv2.Mat: |
| 43 | + """ |
| 44 | + Compute absolute difference between consecutive grayscale frames. |
| 45 | + Returns a binary motion mask after thresholding and morphology. |
| 46 | + """ |
| 47 | + diff = cv2.absdiff(prev_gray, curr_gray) |
| 48 | + _, thresh = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY) |
| 49 | + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, MORPH_KERNEL_SIZE) |
| 50 | + opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1) |
| 51 | + closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel, iterations=1) |
| 52 | + return closed |
| 53 | + |
| 54 | + |
| 55 | +def background_subtraction_mask(subtractor: cv2.BackgroundSubtractor, frame: cv2.Mat) -> cv2.Mat: |
| 56 | + """ |
| 57 | + Apply background subtraction to obtain a motion mask. Includes morphology. |
| 58 | + """ |
| 59 | + fg_mask = subtractor.apply(frame) |
| 60 | + # Remove shadows if present (MOG2 shadows are typically 127) |
| 61 | + _, fg_mask = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY) |
| 62 | + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, MORPH_KERNEL_SIZE) |
| 63 | + opened = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=1) |
| 64 | + closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel, iterations=1) |
| 65 | + return closed |
| 66 | + |
| 67 | + |
| 68 | +def annotate_motion(frame: cv2.Mat, motion_mask: cv2.Mat) -> cv2.Mat: |
| 69 | + """ |
| 70 | + Find contours on the motion mask and draw bounding boxes on the frame. |
| 71 | + """ |
| 72 | + contours, _ = cv2.findContours(motion_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
| 73 | + annotated = frame.copy() |
| 74 | + for contour in contours: |
| 75 | + if cv2.contourArea(contour) < MIN_CONTOUR_AREA: |
| 76 | + continue |
| 77 | + x, y, w, h = cv2.boundingRect(contour) |
| 78 | + cv2.rectangle(annotated, (x, y), (x + w, y + h), (0, 255, 0), 2) |
| 79 | + return annotated |
| 80 | + |
| 81 | + |
| 82 | +def main() -> None: |
| 83 | + cap = cv2.VideoCapture(VIDEO_SOURCE) |
| 84 | + if not cap.isOpened(): |
| 85 | + raise RuntimeError("Unable to open video source. Set VIDEO_SOURCE correctly.") |
| 86 | + |
| 87 | + subtractor = create_background_subtractor() |
| 88 | + |
| 89 | + ret, prev_frame = cap.read() |
| 90 | + if not ret: |
| 91 | + cap.release() |
| 92 | + raise RuntimeError("Failed to read initial frame from source.") |
| 93 | + |
| 94 | + prev_gray = preprocess_frame(prev_frame) |
| 95 | + |
| 96 | + while True: |
| 97 | + ret, frame = cap.read() |
| 98 | + if not ret: |
| 99 | + break |
| 100 | + |
| 101 | + # Optionally resize for display/performance |
| 102 | + if DISPLAY_SCALE != 1.0: |
| 103 | + frame = cv2.resize(frame, None, fx=DISPLAY_SCALE, fy=DISPLAY_SCALE) |
| 104 | + |
| 105 | + curr_gray = preprocess_frame(frame) |
| 106 | + |
| 107 | + # Frame differencing motion mask |
| 108 | + diff_mask = frame_difference(prev_gray, curr_gray) |
| 109 | + |
| 110 | + # Background subtraction motion mask |
| 111 | + bs_mask = background_subtraction_mask(subtractor, frame) |
| 112 | + |
| 113 | + # Combine masks to be more robust (logical OR) |
| 114 | + combined_mask = cv2.bitwise_or(diff_mask, bs_mask) |
| 115 | + |
| 116 | + annotated = annotate_motion(frame, combined_mask) |
| 117 | + |
| 118 | + cv2.imshow("Motion Mask", combined_mask) |
| 119 | + cv2.imshow("Motion Detection", annotated) |
| 120 | + |
| 121 | + prev_gray = curr_gray |
| 122 | + |
| 123 | + key = cv2.waitKey(1) & 0xFF |
| 124 | + if key == ord("q"): |
| 125 | + break |
| 126 | + |
| 127 | + cap.release() |
| 128 | + cv2.destroyAllWindows() |
| 129 | + |
| 130 | + |
| 131 | +if __name__ == "__main__": |
| 132 | + main() |
| 133 | + print("DONE ✅") |
| 134 | + |
| 135 | + |
0 commit comments