|
@@ -5,41 +5,68 @@ import time
|
|
|
import cv2
|
|
|
import sys
|
|
|
import imutils
|
|
|
+import np
|
|
|
import math # for sqrt distance formula
|
|
|
|
|
|
# Create the haar cascade
|
|
|
frontalfaceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
|
|
|
profilefaceCascade = cv2.CascadeClassifier("haarcascade_profileface.xml")
|
|
|
|
|
|
-face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle)
|
|
|
-center = [0,0] # Center of the face: a point calculated from the above variable
|
|
|
-lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,-
|
|
|
- # a left profile face, or a frontal face; rather than searching for all three every time,-
|
|
|
- # it uses this variable to remember which is last saw: and looks for that again. If it-
|
|
|
- # doesn't find it, it's set back to zero and on the next loop it will search for all three.-
|
|
|
- # This basically tripples the detect time so long as the face hasn't moved much.
|
|
|
+face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle)
|
|
|
+center = [0,0] # Center of the face: a point calculated from the above variable
|
|
|
+lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,-
|
|
|
+ # a left profile face, or a frontal face; rather than searching for all three every time,-
|
|
|
+ # it uses this variable to remember which is last saw: and looks for that again. If it-
|
|
|
+ # doesn't find it, it's set back to zero and on the next loop it will search for all three.-
|
|
|
+ # This basically tripples the detect time so long as the face hasn't moved much.
|
|
|
|
|
|
|
|
|
# initialize the camera and grab a reference to the raw camera capture
|
|
|
camera = PiCamera()
|
|
|
#camera.resolution = (160, 120)
|
|
|
-camera.resolution = (320,240)
|
|
|
-cameracenter = (320/2, 240/2)
|
|
|
+camera.resolution = (640,480)
|
|
|
+cameracenter = (camera.resolution[0]/2, camera.resolution[1]/2)
|
|
|
camera.framerate = 32
|
|
|
-rawCapture = PiRGBArray(camera, size=(320, 240))
|
|
|
+rawCapture = PiRGBArray(camera, camera.resolution)
|
|
|
+
|
|
|
+# Fisheye corrections. See https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
|
|
|
+DIM=(640, 480)
|
|
|
+K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
|
|
|
+D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
|
|
|
|
|
|
def distance(p0, p1):
|
|
|
return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
|
|
|
|
|
|
def search_rightprofile(i):
|
|
|
- return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
|
|
|
+# return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
|
|
|
+ return profilefaceCascade.detectMultiScale(i)
|
|
|
|
|
|
def search_leftprofile(i):
|
|
|
revimage = cv2.flip(i, 1) # Flip the image
|
|
|
- return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
|
|
|
+# return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
|
|
|
+ return profilefaceCascade.detectMultiScale(i)
|
|
|
|
|
|
def search_frontface(i):
|
|
|
- return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
|
|
|
+# return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
|
|
|
+ return frontalfaceCascade.detectMultiScale(i)
|
|
|
+
|
|
|
+def undistort(i, balance=0.0, dim2=None, dim3=None):
|
|
|
+ # Sanity Check the source dimensions
|
|
|
+ dim1 = i.shape[:2][::-1] #dim1 is the dimension of input image to un-distort
|
|
|
+ assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
|
|
|
+
|
|
|
+ if not dim2:
|
|
|
+ dim2 = dim1
|
|
|
+ if not dim3:
|
|
|
+ dim3 = dim1
|
|
|
+
|
|
|
+ scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
|
|
|
+ scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
|
|
|
+
|
|
|
+ # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
|
|
|
+ new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
|
|
|
+ map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)
|
|
|
+ return cv2.remap(i, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
|
|
|
|
|
|
# allow the camera to warmup
|
|
|
time.sleep(0.1)
|
|
@@ -47,83 +74,87 @@ lastTime = time.time()*1000.0
|
|
|
|
|
|
# capture frames from the camera
|
|
|
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
|
|
|
- # grab the raw NumPy array representing the image, then initialize the timestamp
|
|
|
- # and occupied/unoccupied text
|
|
|
+ # grab the raw NumPy array representing the image, then initialize the timestamp
|
|
|
+ # and occupied/unoccupied text
|
|
|
image = frame.array
|
|
|
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
|
|
|
+ image = undistort(image, 0.8)
|
|
|
+ faces = ();
|
|
|
|
|
|
- faceFound = False # This variable is set to true if, on THIS loop a face has already been found
|
|
|
- # We search for a face three diffrent ways, and if we have found one already-
|
|
|
- # there is no reason to keep looking.
|
|
|
- faces = ()
|
|
|
-
|
|
|
+ faceFound = False # This variable is set to true if, on THIS loop a face has already been found
|
|
|
+ # We search for a face three diffrent ways, and if we have found one already-
|
|
|
+ # there is no reason to keep looking.
|
|
|
# First Scan
|
|
|
if lastface == 1:
|
|
|
- faces = search_rightprofile(image)
|
|
|
+ faces = search_rightprofile(image)
|
|
|
if faces != ():
|
|
|
faceFound=True
|
|
|
lastface = 1
|
|
|
elif lastface == 2:
|
|
|
faces = search_leftprofile(image)
|
|
|
if faces != ():
|
|
|
- faceFound=True
|
|
|
- lastface = 2
|
|
|
+ faceFound=True
|
|
|
+ lastface = 2
|
|
|
else:
|
|
|
- faces = search_frontface(image)
|
|
|
+ faces = search_frontface(image)
|
|
|
if faces != ():
|
|
|
lastface = 3
|
|
|
faceFound=True
|
|
|
|
|
|
# Second scan
|
|
|
if not faceFound:
|
|
|
- faces = search_frontface(image)
|
|
|
- if faces != ():
|
|
|
- lastface = 3
|
|
|
- faceFound=True
|
|
|
- elif lastface == 2:
|
|
|
- faces = search_rightprofile(image)
|
|
|
- if faces != ():
|
|
|
- faceFound=True
|
|
|
- lastface = 1
|
|
|
- else:
|
|
|
- faces = search_leftprofile(image)
|
|
|
- if faces != ():
|
|
|
- faceFound=True
|
|
|
- lastface = 2
|
|
|
+ if lastface == 1:
|
|
|
+ faces = search_frontface(image)
|
|
|
+ if faces != ():
|
|
|
+ lastface = 3
|
|
|
+ faceFound=True
|
|
|
+ elif lastface == 2:
|
|
|
+ faces = search_rightprofile(image)
|
|
|
+ if faces != ():
|
|
|
+ faceFound=True
|
|
|
+ lastface = 1
|
|
|
+ else:
|
|
|
+ faces = search_leftprofile(image)
|
|
|
+ if faces != ():
|
|
|
+ faceFound=True
|
|
|
+ lastface = 2
|
|
|
|
|
|
# Third scan
|
|
|
if not faceFound:
|
|
|
- faces = search_leftprofile(image)
|
|
|
- if faces != ():
|
|
|
- faceFound=True
|
|
|
- lastface = 2
|
|
|
- elif lastface == 2:
|
|
|
- faces = search_frontface(image)
|
|
|
- if faces != ():
|
|
|
- lastface = 3
|
|
|
- faceFound=True
|
|
|
- else:
|
|
|
- faces = search_rightprofile(image)
|
|
|
- if faces != ():
|
|
|
- faceFound=True
|
|
|
- lastface = 1
|
|
|
+ if lastface == 1:
|
|
|
+ faces = search_leftprofile(image)
|
|
|
+ if faces != ():
|
|
|
+ faceFound=True
|
|
|
+ lastface = 2
|
|
|
+ elif lastface == 2:
|
|
|
+ faces = search_frontface(image)
|
|
|
+ if faces != ():
|
|
|
+ lastface = 3
|
|
|
+ faceFound=True
|
|
|
+ else:
|
|
|
+ faces = search_rightprofile(image)
|
|
|
+ if faces != ():
|
|
|
+ faceFound=True
|
|
|
+ lastface = 1
|
|
|
+
|
|
|
|
|
|
if not faceFound:
|
|
|
- print time.time()*1000.0-lastTime," no faces found."
|
|
|
+ print("{}: no faces found.".format(time.time()*1000.0-lastTime))
|
|
|
lastTime = time.time()*1000.0
|
|
|
# clear the stream in preparation for the next frame
|
|
|
rawCapture.truncate(0)
|
|
|
continue;
|
|
|
|
|
|
- print time.time()*1000.0-lastTime," {} faces found.".format(len(faces))
|
|
|
+ print("{}: {} faces found. Type: {}".format(time.time()*1000.0-lastTime, len(faces), lastface))
|
|
|
|
|
|
lastTime = time.time()*1000.0
|
|
|
|
|
|
# Draw a rectangle around the faces
|
|
|
for (x, y, w, h) in faces:
|
|
|
- cv2.circle(image, (x+w/2, y+h/2), int((w+h)/3), (255, 255, 255), 1)
|
|
|
+ cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 255), 1)
|
|
|
|
|
|
# Temporary, save the image
|
|
|
- cv2.imwrite("tmp/img.{}.png".format(datetime.now().strftime("%Y%m%%d.%H%M%S.%f")), image)
|
|
|
+ cv2.imwrite("tmp/img.{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f")), image)
|
|
|
|
|
|
# clear the stream in preparation for the next frame
|
|
|
rawCapture.truncate(0)
|
|
@@ -132,10 +163,10 @@ for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=
|
|
|
curdistance = 1000000 # Outside the dimensions of the picture
|
|
|
for f in faces:
|
|
|
x,y,w,h = f
|
|
|
- tmpcenter = [(w/2+x),(h/2+y)] # we are given an x,y corner point and a width and height, we need the center
|
|
|
+ tmpcenter = [(w/2+x),(h/2+y)] # we are given an x,y corner point and a width and height, we need the center
|
|
|
tmpdistance = distance(tmpcenter, cameracenter)
|
|
|
if(tmpdistance < curdistance):
|
|
|
- print "Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance)
|
|
|
+ print("Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance))
|
|
|
center = tmpcenter;
|
|
|
|
|
|
|