123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188 |
- #! /usr/bin/python3
- from picamera.array import PiRGBArray
- from picamera import PiCamera
- from datetime import datetime
- import time
- import cv2
- import sys
- import imutils
- import np
- import math # for sqrt distance formula
- # Create the haar cascade
- frontalfaceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
- profilefaceCascade = cv2.CascadeClassifier("haarcascade_profileface.xml")
- face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle)
- center = [0,0] # Center of the face: a point calculated from the above variable
- lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,-
- # a left profile face, or a frontal face; rather than searching for all three every time,-
- # it uses this variable to remember which is last saw: and looks for that again. If it-
- # doesn't find it, it's set back to zero and on the next loop it will search for all three.-
- # This basically tripples the detect time so long as the face hasn't moved much.
- # initialize the camera and grab a reference to the raw camera capture
- camera = PiCamera()
- #camera.resolution = (160, 120)
- #camera.resolution = (640,480)
- camera.resolution = (1024,768)
- cameracenter = (camera.resolution[0]/2, camera.resolution[1]/2)
- camera.framerate = 32
- rawCapture = PiRGBArray(camera, camera.resolution)
- # Points to the last place we sawa a face
- target = ( camera.resolution[0]/2, camera.resolution[1]/2 )
- # Fisheye corrections. See https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
- # 640x480:
- #correct_fisheye = False
- #DIM=(640, 480)
- #K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
- #D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
- # 1024x768:
- correct_fisheye = True
- DIM=(1024, 768)
- K=np.array([[583.6639649321671, 0.0, 518.0139106134624], [0.0, 580.8039721094127, 384.32095600935503], [0.0, 0.0, 1.0]])
- D=np.array([[0.0028045742945672475], [-0.14423839478882694], [0.23715105072799644], [-0.1400677375634837]])
- def distance(p0, p1):
- return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
- def search_rightprofile(i):
- # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
- return profilefaceCascade.detectMultiScale(i)
- def search_leftprofile(i):
- revimage = cv2.flip(i, 1) # Flip the image
- # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
- return profilefaceCascade.detectMultiScale(i)
- def search_frontface(i):
- # return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
- return frontalfaceCascade.detectMultiScale(i)
- def undistort(i, balance=0.0, dim2=None, dim3=None):
- # Sanity Check the source dimensions
- dim1 = i.shape[:2][::-1] #dim1 is the dimension of input image to un-distort
- assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
- if not dim2:
- dim2 = dim1
- if not dim3:
- dim3 = dim1
- scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
- scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
- # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
- new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
- map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)
- return cv2.remap(i, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
- # allow the camera to warmup
- time.sleep(0.1)
- lastTime = time.time()*1000.0
- # capture frames from the camera
- for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
- # grab the raw NumPy array representing the image, then initialize the timestamp
- # and occupied/unoccupied text
- image = frame.array
- image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
- if correct_fisheye:
- image = undistort(image, 0.8)
- faces = ();
-
- faceFound = False # This variable is set to true if, on THIS loop a face has already been found
- # We search for a face three diffrent ways, and if we have found one already-
- # there is no reason to keep looking.
- # First Scan
- if lastface == 1:
- faces = search_rightprofile(image)
- if faces != ():
- faceFound=True
- lastface = 1
- elif lastface == 2:
- faces = search_leftprofile(image)
- if faces != ():
- faceFound=True
- lastface = 2
- else:
- faces = search_frontface(image)
- if faces != ():
- lastface = 3
- faceFound=True
- # Second scan
- if not faceFound:
- if lastface == 1:
- faces = search_frontface(image)
- if faces != ():
- lastface = 3
- faceFound=True
- elif lastface == 2:
- faces = search_rightprofile(image)
- if faces != ():
- faceFound=True
- lastface = 1
- else:
- faces = search_leftprofile(image)
- if faces != ():
- faceFound=True
- lastface = 2
- # Third scan
- if not faceFound:
- if lastface == 1:
- faces = search_leftprofile(image)
- if faces != ():
- faceFound=True
- lastface = 2
- elif lastface == 2:
- faces = search_frontface(image)
- if faces != ():
- lastface = 3
- faceFound=True
- else:
- faces = search_rightprofile(image)
- if faces != ():
- faceFound=True
- lastface = 1
- if faceFound:
- print("{}: {} faces found. Type: {}".format(time.time()*1000.0-lastTime, len(faces), lastface))
- # Draw a rectangle around the faces
- for (x, y, w, h) in faces:
- cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 255), 1)
- # Temporary, save the image
- cv2.imwrite("tmp/img.{}.facetype{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f"), lastface), image)
- # Find the centermost face
- curdistance = 1000000 # Outside the dimensions of the picture
- for f in faces:
- x,y,w,h = f
- tmpcenter = [(w/2+x),(h/2+y)] # we are given an x,y corner point and a width and height, we need the center
- tmpdistance = distance(tmpcenter, cameracenter)
- if(tmpdistance < curdistance):
- print("Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance))
- center = tmpcenter;
- target = center
- else: # No face found
- print("{}: no faces found. Continuing with existing target ({}, {})".format(time.time()*1000.0-lastTime, target[0], target[1]))
- # clear the stream in preparation for the next frame
- rawCapture.truncate(0)
- lastTime = time.time()*1000.0
-
- # Determine directions and distance
- travel = [ (cameracenter[0] - target[0]) / camera.resolution[0], (cameracenter[1] - target[1]) /camera.resolution[1] ]
- print("To move horizontal: {}, vertical: {}".format(travel[0], travel[1]))
-
|