123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306 |
- #! /usr/bin/python3
- from picamera.array import PiRGBArray
- from picamera import PiCamera
- from datetime import datetime
- import time
- import cv2
- import sys
- import imutils
- import np
- import math # for sqrt distance formula
- # i2c stuff
- import smbus
- bus = smbus.SMBus(1)
- SLAVE_ADDRESS = 0x04
- time_before_scan = 3*60*1000.0
- time_after_scan = (3*60+15)*1000.0
- # Create the haar cascade
- #frontalfaceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
- frontalfaceCascade = cv2.CascadeClassifier("frontalface_fromweb.xml")
- profilefaceCascade = cv2.CascadeClassifier("haarcascade_profileface.xml")
- face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle)
- center = [0,0] # Center of the face: a point calculated from the above variable
- lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,-
- # a left profile face, or a frontal face; rather than searching for all three every time,-
- # it uses this variable to remember which is last saw: and looks for that again. If it-
- # doesn't find it, it's set back to zero and on the next loop it will search for all three.-
- # This basically tripples the detect time so long as the face hasn't moved much.
- scanleft = True # Should we scan for left profiles?
- scanright = True # should we scan for right profiles?
- # initialize the camera and grab a reference to the raw camera capture
- camera = PiCamera()
- #camera.resolution = (160, 120)
- #camera.resolution = (640,480)
- camera.resolution = (1024,768)
- cameracenter = (camera.resolution[0]/2, camera.resolution[1]/2)
- camera.framerate = 32
- rawCapture = PiRGBArray(camera, camera.resolution)
- # Points to the last place we sawa a face
- target = ( camera.resolution[0]/2, camera.resolution[1]/2 )
- # Fisheye corrections. See https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
- # 640x480:
- #correct_fisheye = False
- #DIM=(640, 480)
- #K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
- #D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
- # 1024x768:
- correct_fisheye = True
- DIM=(1024, 768)
- K=np.array([[583.6639649321671, 0.0, 518.0139106134624], [0.0, 580.8039721094127, 384.32095600935503], [0.0, 0.0, 1.0]])
- D=np.array([[0.0028045742945672475], [-0.14423839478882694], [0.23715105072799644], [-0.1400677375634837]])
- #def distance(p0, p1):
- # return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
- def search_rightprofile(i):
- return ()
- # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
- if scanright:
- return profilefaceCascade.detectMultiScale(i)
- return profilefaceCascade.detectMultiScale(i, maxSize=(30,30))
- else:
- return ()
- def search_leftprofile(i):
- return ()
- if scanleft:
- revimage = cv2.flip(i, 1) # Flip the image
- # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
- return profilefaceCascade.detectMultiScale(i)
- return profilefaceCascade.detectMultiScale(i, maxSize=(30,30))
- else:
- return ()
- def search_frontface(i):
- # return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
- return frontalfaceCascade.detectMultiScale(i)
- return frontalfaceCascade.detectMultiScale(i, maxSize=(30,30))
- def undistort(i, balance=0.0, dim2=None, dim3=None):
- # Sanity Check the source dimensions
- dim1 = i.shape[:2][::-1] #dim1 is the dimension of input image to un-distort
- assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
- if not dim2:
- dim2 = dim1
- if not dim3:
- dim3 = dim1
- scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
- scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
- # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
- new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
- map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)
- return cv2.remap(i, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
- def findface(image):
- global lastface
- global correct_fisheye
- faces = ();
-
- # TODO: There is a better way to do this. Find it.
- # First Scan
- if lastface == 1:
- faces = search_rightprofile(image)
- if faces != ():
- lastface = 1
- return faces
- elif lastface == 2:
- faces = search_leftprofile(image)
- if faces != ():
- lastface = 2
- return faces
- else:
- faces = search_frontface(image)
- if faces != ():
- faceFound=True
- return faces
- # Second scan
- if lastface == 1:
- faces = search_frontface(image)
- if faces != ():
- lastface = 3
- return faces
- elif lastface == 2:
- faces = search_rightprofile(image)
- if faces != ():
- lastface = 1
- return faces
- else:
- faces = search_leftprofile(image)
- if faces != ():
- lastface = 2
- return faces
- # Third scan
- if lastface == 1:
- faces = search_leftprofile(image)
- if faces != ():
- lastface = 2
- return faces
- elif lastface == 2:
- faces = search_frontface(image)
- if faces != ():
- lastface = 3
- return faces
- else:
- faces = search_rightprofile(image)
- if faces != ():
- lastface = 1
- return faces
- return ()
- def circlefaces(image, faces):
- global lastface
- for (x, y, w, h) in faces:
- cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 0), 1)
- # Temporary, save the image
- cv2.imwrite("tmp/img.{}.facetype{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f"), lastface), image)
- def distance_to_closest(faces):
- # Negative values will be left
- closestdistance = None
- for f in faces:
- x,y,w,h = f
- print("Face found at {},{} with width {} and height {}.".format(x,y,w,h))
- centerpoint = (w/2)+x
- distance = centerpoint - cameracenter[0]
- if(closestdistance == None or abs(distance) < closestdistance):
- print("Face closer to center detected. New target location: {} (ctr: {}) - distance: {}".format(centerpoint, cameracenter[0], distance))
- closestdistance = distance
- return closestdistance
- def send_char(tc):
- try:
- bus.write_byte(SLAVE_ADDRESS, ord(tc))
- except Exception as e:
- print("Bus Error while sending {}: {}".format(tc, str(e)))
- def stop():
- print("STOPPING")
- send_char(' ')
- return
- def left(distance):
- send_char('a')
- if abs(distance) > 300:
- print('GO LEFT')
- return
- elif abs(distance) > 50:
- print('GO LEFT FOR {}s (divisior 175)'.format(1.0*abs(distance)/175.0))
- time.sleep(1.0*abs(distance)/175.0)
- stop()
- else:
- print('GO LEFT FOR {}s (divisor 100)'.format(1.0*abs(distance)/100.0))
- time.sleep(1.0*abs(distance)/100.0)
- stop()
- def right(distance=None):
- send_char('d')
- if abs(distance) > 300:
- print("GO RIGHT")
- return
- elif abs(distance) > 50:
- print('GO RIGHT FOR {}s (divisior 175)'.format(1.0*abs(distance)/175.0))
- time.sleep(1.0*abs(distance)/175.0)
- stop()
- else:
- print('GO RIGHT FOR {}s (divisor 100)'.format(1.0*abs(distance)/100.0))
- time.sleep(1.0*abs(distance)/100.0)
- stop()
- def fire():
- print("FIRING!")
- send_char('F')
- return
- if __name__ == "__main__":
- # allow the camera to warmup
- time.sleep(0.1)
- lastTime = time.time()*1000.0
- lastDetected = lastTime
- lastAction = None
- # capture frames from the camera
- for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
- # grab the raw NumPy array representing the image, then initialize the timestamp
- # and occupied/unoccupied text
- timeSinceDetected = 1000.0*time.time() - lastDetected
- print('Time: {}; Time since detected: {} ({}/{})'.format(time.time()*1000.0 - lastTime, timeSinceDetected, time_before_scan, time_after_scan))
- lastTime = time.time()*1000.0
- image = frame.array
- # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
- if correct_fisheye:
- image = undistort(image, 0.8)
- faces = findface(image)
- if faces == ():
- print("No face found.")
- rawCapture.truncate(0)
- if timeSinceDetected > time_before_scan and timeSinceDetected < time_after_scan:
- if lastAction != "Scan":
- lastAction = "Scan"
- print("Beginning Scanning...")
- right(1000.0) # 1000 is arbitrary
- # Otherwise just keep doing what youre doing
- elif timeSinceDetected > time_after_scan:
- lastAction = "Stop"
- print("Stopping scanning...")
- lastDetected = time.time()*1000.0
- stop()
- else:
- lastAction = "Stop"
- stop()
- continue
- lastDetected = time.time() * 1000.0
- circlefaces(image, faces)
- distance = distance_to_closest(faces)
- if lastAction == "Scan":
- # Scanning, but detected a face. Stop and continue
- print("Face detected. Aborted scanning.")
- stop()
- lastAction = "Stop"
- elif abs(distance) < 15:
- if lastAction == "Fire":
- # Do nothing
- time.sleep(1)
- elif lastAction == "Stop":
- lastAction = "Fire"
- fire()
- print("Sleeping for 15 seconds...")
- time.sleep(15)
- print("Resuming...")
- else:
- lastAction = "Stop"
- stop()
- elif distance < 0:
- lastAction = "Left"
- left(distance)
- elif distance > 0:
- lastAction = "Right"
- right(distance)
- else:
- print("Face found but no action taken. Distance = {}; Last Action = {}".format(distance, lastAction))
-
- # clear the stream in preparation for the next frame
- rawCapture.truncate(0)
|