RaspberryPi-Security-Camera / SecurityCamera / camera.py
camera.py
Raw
import cv2
from imutils.video.pivideostream import PiVideoStream
import imutils
import time
import numpy as np
import os
import pygame, sys
from pygame.locals import *
import pygame.camera
from datetime import datetime


class VideoCamera(object):
    def __init__(self, flip=False):
        # self.vs , self.CameraObj = PiVideoStream().start()
        self.vs, self.CamObj, self.rawCapture = PiVideoStream().start()
        # self.vs = VideoStream().start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read()).copy()

        # draw the text and timestamp on the frame
        text = 'Live'
        timestamp = datetime.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
        cv2.putText(frame, "You are watching: {} feed".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 0, 255), 2)
        cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # cv2.imshow("Frame", frame)
        # key = cv2.waitKey(1) & 0xFF

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        timestamp = datetime.now()
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        frameTimeLapse = self.flip_if_needed(self.vs.read()).copy()
        bufferFrame = self.flip_if_needed(self.vs.read()).copy()

        gray = cv2.cvtColor(frame,
                            cv2.COLOR_BGR2GRAY)  # The function converts an input image from one color space to another

        objects = classifier.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE
        )

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        # draw the text and timestamp on the frame
        text = 'Some movement detected in room!'
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")

        cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 1)

        cv2.putText(frameTimeLapse, "Time-lapse", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frameTimeLapse, ts, (10, frameTimeLapse.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255),
                    1)

        cv2.putText(bufferFrame, "Buffer Frames", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(bufferFrame, ts, (10, bufferFrame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 1)

        ret, jpeg = cv2.imencode('.jpg', frame)

        return (jpeg.tobytes(), found_objects, frame, frameTimeLapse, bufferFrame)