123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117 |
- import cv2
- import dlib
- import numpy as np
- ## Face detection
- def face_detection(img,upsample_times=1):
- # Ask the detector to find the bounding boxes of each face. The 1 in the
- # second argument indicates that we should upsample the image 1 time. This
- # will make everything bigger and allow us to detect more faces.
- detector = dlib.get_frontal_face_detector()
- faces = detector(img, upsample_times)
- return faces
- PREDICTOR_PATH = 'models/shape_predictor_68_face_landmarks.dat'
- predictor = dlib.shape_predictor(PREDICTOR_PATH)
- ## Face and points detection
- def face_points_detection(img, bbox:dlib.rectangle):
- # Get the landmarks/parts for the face in box d.
- shape = predictor(img, bbox)
- # loop over the 68 facial landmarks and convert them
- # to a 2-tuple of (x, y)-coordinates
- coords = np.asarray(list([p.x, p.y] for p in shape.parts()), dtype=np.int)
- # return the array of (x, y)-coordinates
- return coords
- def select_face(im, r=10, choose=True):
- faces = face_detection(im)
- if len(faces) == 0:
- return None, None, None
- if len(faces) == 1 or not choose:
- idx = int(np.argmax([(face.right() - face.left()) * (face.bottom() - face.top()) for face in faces]))
- bbox = faces[idx]
- else:
- bbox = []
- def click_on_face(event, x, y, flags, params):
- if event != cv2.EVENT_LBUTTONDOWN:
- return
- for face in faces:
- if face.left() < x < face.right() and face.top() < y < face.bottom():
- bbox.append(face)
- break
- im_copy = im.copy()
- for face in faces:
- # draw the face bounding box
- cv2.rectangle(im_copy, (face.left(), face.top()), (face.right(), face.bottom()), (0, 0, 255), 1)
- cv2.imshow('Click the Face:', im_copy)
- cv2.setMouseCallback('Click the Face:', click_on_face)
- while len(bbox) == 0:
- cv2.waitKey(1)
- cv2.destroyAllWindows()
- bbox = bbox[0]
- points = np.asarray(face_points_detection(im, bbox))
- im_w, im_h = im.shape[:2]
- left, top = np.min(points, 0)
- right, bottom = np.max(points, 0)
- x, y = max(0, left - r), max(0, top - r)
- w, h = min(right + r, im_h) - x, min(bottom + r, im_w) - y
- return points - np.asarray([[x, y]]), (x, y, w, h), im[y:y + h, x:x + w]
- def get_landmarks(im):
- """
- Function that returns the face landmarks.
- """
- rects = face_detection(im, upsample_times=1)
-
- if len(rects) > 1:
- #print("Too Many Faces") #raise TooManyFaces
- return []
- if len(rects) == 0:
- #print("No Faces") #raise NoFaces
- return []
- return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
- SCALE_FACTOR = 1
- def read_im_and_landmarks(fname):
- """
- Function that read the images and gets the face landmarks
- in the read images.
- """
- im = cv2.imread(fname, cv2.IMREAD_COLOR)
- im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
- im.shape[0] * SCALE_FACTOR))
- s = get_landmarks(im)
- return im, s
- def mask_mouth(source_img):
- img, marks = read_im_and_landmarks(source_img)
-
- tmp_img=img.copy()
- tmp_blank_img=img.copy()
- tmp_blank_img=tmp_blank_img*0
-
- mask_list=marks[49:61]
- draw_img=cv2.drawContours(tmp_blank_img,[mask_list],-1,(255,255,255),-1)
- draw_img_re=cv2.bitwise_not(draw_img)
- tmp_img=tmp_img&draw_img_re
-
- return tmp_img
|