facetracker.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #! /usr/bin/python3
  2. from picamera.array import PiRGBArray
  3. from picamera import PiCamera
  4. from datetime import datetime
  5. import time
  6. import cv2
  7. import sys
  8. import imutils
  9. import np
  10. import math # for sqrt distance formula
  11. # Create the haar cascade
  12. frontalfaceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
  13. profilefaceCascade = cv2.CascadeClassifier("haarcascade_profileface.xml")
  14. face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle)
  15. center = [0,0] # Center of the face: a point calculated from the above variable
  16. lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,-
  17. # a left profile face, or a frontal face; rather than searching for all three every time,-
  18. # it uses this variable to remember which is last saw: and looks for that again. If it-
  19. # doesn't find it, it's set back to zero and on the next loop it will search for all three.-
  20. # This basically tripples the detect time so long as the face hasn't moved much.
  21. scanleft = True # Should we scan for left profiles?
  22. scanright = True # should we scan for right profiles?
  23. # initialize the camera and grab a reference to the raw camera capture
  24. camera = PiCamera()
  25. #camera.resolution = (160, 120)
  26. #camera.resolution = (640,480)
  27. camera.resolution = (1024,768)
  28. cameracenter = (camera.resolution[0]/2, camera.resolution[1]/2)
  29. camera.framerate = 32
  30. rawCapture = PiRGBArray(camera, camera.resolution)
  31. # Points to the last place we sawa a face
  32. target = ( camera.resolution[0]/2, camera.resolution[1]/2 )
  33. # Fisheye corrections. See https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
  34. # 640x480:
  35. #correct_fisheye = False
  36. #DIM=(640, 480)
  37. #K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
  38. #D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
  39. # 1024x768:
  40. correct_fisheye = True
  41. DIM=(1024, 768)
  42. K=np.array([[583.6639649321671, 0.0, 518.0139106134624], [0.0, 580.8039721094127, 384.32095600935503], [0.0, 0.0, 1.0]])
  43. D=np.array([[0.0028045742945672475], [-0.14423839478882694], [0.23715105072799644], [-0.1400677375634837]])
  44. def distance(p0, p1):
  45. return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
  46. def search_rightprofile(i):
  47. # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
  48. if scanright:
  49. return profilefaceCascade.detectMultiScale(i)
  50. else:
  51. return ()
  52. def search_leftprofile(i):
  53. if scanleft:
  54. revimage = cv2.flip(i, 1) # Flip the image
  55. # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
  56. return profilefaceCascade.detectMultiScale(i)
  57. else:
  58. return ()
  59. def search_frontface(i):
  60. # return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
  61. return frontalfaceCascade.detectMultiScale(i)
  62. def undistort(i, balance=0.0, dim2=None, dim3=None):
  63. # Sanity Check the source dimensions
  64. dim1 = i.shape[:2][::-1] #dim1 is the dimension of input image to un-distort
  65. assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
  66. if not dim2:
  67. dim2 = dim1
  68. if not dim3:
  69. dim3 = dim1
  70. scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
  71. scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
  72. # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
  73. new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
  74. map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)
  75. return cv2.remap(i, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
  76. # allow the camera to warmup
  77. time.sleep(0.1)
  78. lastTime = time.time()*1000.0
  79. # capture frames from the camera
  80. for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
  81. # grab the raw NumPy array representing the image, then initialize the timestamp
  82. # and occupied/unoccupied text
  83. image = frame.array
  84. image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
  85. if correct_fisheye:
  86. image = undistort(image, 0.8)
  87. faces = ();
  88. faceFound = False # This variable is set to true if, on THIS loop a face has already been found
  89. # We search for a face three diffrent ways, and if we have found one already-
  90. # there is no reason to keep looking.
  91. # First Scan
  92. if lastface == 1:
  93. faces = search_rightprofile(image)
  94. if faces != ():
  95. faceFound=True
  96. lastface = 1
  97. elif lastface == 2:
  98. faces = search_leftprofile(image)
  99. if faces != ():
  100. faceFound=True
  101. lastface = 2
  102. else:
  103. faces = search_frontface(image)
  104. if faces != ():
  105. lastface = 3
  106. faceFound=True
  107. # Second scan
  108. if not faceFound:
  109. if lastface == 1:
  110. faces = search_frontface(image)
  111. if faces != ():
  112. lastface = 3
  113. faceFound=True
  114. elif lastface == 2:
  115. faces = search_rightprofile(image)
  116. if faces != ():
  117. faceFound=True
  118. lastface = 1
  119. else:
  120. faces = search_leftprofile(image)
  121. if faces != ():
  122. faceFound=True
  123. lastface = 2
  124. # Third scan
  125. if not faceFound:
  126. if lastface == 1:
  127. faces = search_leftprofile(image)
  128. if faces != ():
  129. faceFound=True
  130. lastface = 2
  131. elif lastface == 2:
  132. faces = search_frontface(image)
  133. if faces != ():
  134. lastface = 3
  135. faceFound=True
  136. else:
  137. faces = search_rightprofile(image)
  138. if faces != ():
  139. faceFound=True
  140. lastface = 1
  141. if faceFound:
  142. print("{}: {} faces found. Type: {}".format(time.time()*1000.0-lastTime, len(faces), lastface))
  143. # Draw a rectangle around the faces
  144. for (x, y, w, h) in faces:
  145. cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 255), 1)
  146. # Temporary, save the image
  147. cv2.imwrite("tmp/img.{}.facetype{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f"), lastface), image)
  148. # Find the centermost face
  149. curdistance = 1000000 # Outside the dimensions of the picture
  150. for f in faces:
  151. x,y,w,h = f
  152. tmpcenter = [(w/2+x),(h/2+y)] # we are given an x,y corner point and a width and height, we need the center
  153. tmpdistance = distance(tmpcenter, cameracenter)
  154. if(tmpdistance < curdistance):
  155. print("Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance))
  156. center = tmpcenter;
  157. target = center
  158. else: # No face found
  159. print("{}: no faces found. Continuing with existing target ({}, {})".format(time.time()*1000.0-lastTime, target[0], target[1]))
  160. # clear the stream in preparation for the next frame
  161. rawCapture.truncate(0)
  162. lastTime = time.time()*1000.0
  163. # Determine directions and distance
  164. travel = [ (cameracenter[0] - target[0]) / camera.resolution[0], (cameracenter[1] - target[1]) /camera.resolution[1] ]
  165. print("To move horizontal: {}, vertical: {}".format(travel[0], travel[1]))