import cv2 import os import sqlite3 import numpy as np from PIL import Image from djangoproject.settings import BASE_DIR # print("before..........") # #BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # detector = cv2.CascadeClassifier(BASE_DIR+'/haarcascade_frontalface_default.xml') # print("hellooooooooo") # recognizer = cv2.face.LBPHFaceRecognizer_create() # # Create a connection witn databse # conn = sqlite3.connect('db.sqlite3') # if conn != 0: # print("Connection Successful") # else: # print('Connection Failed') # exit() # Creating table if it doesn't already exists # conn.execute('''create table if not exists facedata ( id int primary key, name char(20) not null)''') class FaceRecognition: #BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def __init__(self): self.detector = cv2.CascadeClassifier(BASE_DIR+'/haarcascade_frontalface_default.xml') self.__recognizer = cv2.face.LBPHFaceRecognizer_create() def faceDetect(self, Entry1,): cart_id = Entry1 cam = cv2.VideoCapture(0) count = 0 while(True): ret, img = cam.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = self.detector.detectMultiScale(gray,scaleFactor=1.5,minNeighbors=5) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) count += 1 # Save the captured image into the datasets folder cv2.imwrite(BASE_DIR+'/Webcashier/dataset/Incustumer.' + str(cart_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) cv2.imshow('Register Face', img) k = cv2.waitKey(100) & 0xff if k == 27: break elif count >= 15: break path = BASE_DIR+'/Webcashier/dataset' def getImagesAndLabels(path): imagePaths = [os.path.join(path,f) for f in os.listdir(path)] faceSamples=[] ids = [] for imagePath in imagePaths: PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale img_numpy = np.array(PIL_img,'uint8') image_id = int(os.path.split(imagePath)[-1].split(".")[1]) faces = self.detector.detectMultiScale(img_numpy) for (x,y,w,h) in faces: faceSamples.append(img_numpy[y:y+h,x:x+w]) ids.append(image_id) return faceSamples,ids faces,ids = getImagesAndLabels(path) self.__recognizer.train(faces, np.array(ids)) self.__recognizer.save(BASE_DIR+'/trainer/trainer.yml') self.__recognizer.read(BASE_DIR+'/trainer/trainer.yml') cascadePath = BASE_DIR+'/haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascadePath) font = cv2.FONT_HERSHEY_SIMPLEX confidence = 0 face_id = 0 minW = 0.1*cam.get(3) minH = 0.1*cam.get(4) while True: ret, img =cam.read() gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor = 1.2, minNeighbors = 5, minSize = (int(minW), int(minH)), ) for(x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) face_id, confidence = self.__recognizer.predict(gray[y:y+h,x:x+w]) # Check if confidence is less then 100 ==> "0" is perfect match if (confidence < 100): name = 'Detected' else: name = "Unknown" cv2.putText(img, str(name), (x+5,y-5), font, 1, (255,255,255), 2) cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) cv2.imshow('Detect Face',img) k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video if k == 27: break if confidence > 50: break cam.release() cv2.destroyAllWindows() return np.abs(face_id) def recognizeFace(self): self.__recognizer.read(BASE_DIR+'/trainer/trainer.yml') cascadePath = BASE_DIR+'/haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascadePath) font = cv2.FONT_HERSHEY_SIMPLEX confidence = 0 # Retriving names from database # data = conn.execute('''select * from facedata''') # for x in data: # names.append(x[1]) # Initialize and start realtime video capture cam = cv2.VideoCapture(0) # Define min window size to be recognized as a face minW = 0.1*cam.get(3) minH = 0.1*cam.get(4) while True: ret, img =cam.read() gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor = 1.2, minNeighbors = 5, minSize = (int(minW), int(minH)), ) for(x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) face_id, confidence = self.__recognizer.predict(gray[y:y+h,x:x+w]) # Check if confidence is less then 100 ==> "0" is perfect match if (confidence < 100): name = 'Detected' else: name = "Unknown" cv2.putText(img, str(name), (x+5,y-5), font, 1, (255,255,255), 2) cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) cv2.imshow('Detect Face',img) k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video if k == 27: break if confidence > 50: break print("\n Exiting Program") cam.release() cv2.destroyAllWindows() print(face_id) return np.abs(face_id)