Automating Messages, Mails and Creating Instances in AWS using Face Detection.

Sri Raviteja
3 min readAug 18, 2021

Team : Summer_6_14
-Mohammed Adnan
-Prattipati Sri Raviteja
-Mohammed Awais Ahmed
-Saami Abdul Samad

SUMMER 2021 β€” Task 06

Task Description πŸ“„

❄️ Create a program that perform below-mentioned task upon recognizing particular face.

When it recognizes your face then β€”
πŸ‘‰ It sends mail to your mail ID by writing this is face of your_name.
πŸ‘‰ Second it send WhatsApp message to your friend, it can be anything.

πŸ“Œ When it recognizes a second face, it can be your friend or family member's face.
πŸ‘‰ Create EC2 instance in the AWS using CLI.
πŸ‘‰ Create a 5 GB EBS volume and attach it to the instance.

AWS CLI setup

Refer to the following links for setting up the AWS

https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html

https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html

Python Library: PyWhatKit is used in order to use the following functions

  • send_mail(email_sender: str, password: str, subject: str, message: str, email_receiver: str)
  • sendwhatmsg(phone_no: str, message: str, time_hour: int, time_min: int, wait_time: int = 20, print_wait_time: bool = True, browser: str = None)

Making a Face Detection Model and Training the Model.

Import the Libraries required

#importing Libraries
import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
import pywhatkit #Used for WhatsApp and email
import datetime
import subprocess as sp
import time
import sys
#importing face classifier fo
face_classifier = cv2.CascadeClassifier(r'haarcascade_frontalface_default.xml')

Collecting the images

def face_extractor(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return None
for (x,y,w,h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face
cap = cv2.VideoCapture(0)
count = 0
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Save files in directory
file_name_path = 'D:/SummerTraining2021/Task6/faces/'+ str(count)+'.jpg'
cv2.imwrite(file_name_path, face)
# Display Live count of images
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Cropped Image', face)
else:
print("No face found")
pass
if cv2.waitKey(1) == 13: #13 is the Enter Key
break
cap.release()
cv2.destroyAllWindows()
print("Images Collected!")
cap.release()

Training the Model

model = cv2.face.LBPHFaceRecognizer_create()
data_path = 'D:/SummerTraining2021/Task6/faces/'
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]
Training_Data, Labels = [], []
for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_Data.append(np.asarray(images, dtype=np.uint8))
Labels.append(i)
Labels = np.asarray(Labels, dtype=np.int32)
model.train(np.asarray(Training_Data), np.asarray(Labels))
print("Model"+" trained successfully")

Implementing the face recognition model and creating AWS Instances

face_classifier = cv2.CascadeClassifier(r'haarcascade_frontalface_default.xml')
def face_detector(img, size=0.5):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)

if faces is ():
return img, []

for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
roi = img[y:y+h, x:x+w]
roi = cv2.resize(roi, (200, 200))
return img, roi
# Opening the camera
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
image, face = face_detector(frame)
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
results=model.predict(face)
if results[1]<500:
confidence=int(100*(1-(results[1])/400))
display=str(confidence)+'% Confident'
cv2.putText(image, display, (220, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255))
if confidence>90:
cv2.show('Face recognized', image)
pywhatkit.sendwhatmasq_instantly(phone_no="<phone_number>", message="Face recognized successfully!")
pywhatkit.send_mail("sender_email","password","Face Recognition","User Recognized","receiver_email")
break
else:
os.system("aws ec2 run-instances --image-id ami-010aff33ed5991201 --instance-type t2.micro
--subnet-id subnet-5793863f --count 1 --security-group-ids sg-00b5099795b410916
--key-name key > ec2.txt")
print("Instance launched !")
os.system("aws ec2 create-volume --availability-zone ap-south-la --size 5 --volume-type group1
--tag-specification Resourcetype=volume,Tags=[{Key=face, Value=volume}] > ebs.txt")
print("Volume created, instance initializing......")
time.sleep(120)
ec2_id=open("ec2.txt",'r').read().split(',')[3].split(':')[1].split('""')[1]
ebs_id=open("ebs.txt",'r').read().split(',')[6].split(':')[1].split('""')[1]
os.system("aws ec2 attach-volume --instance-id "+ec2_id+" --volume-id" +ebs_id+ " --device /dev/xvdf")
print("Volume Attached")
break

except:
cv2.putText(image, "No Face Found", (220, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow('Face Recognition', image)
pass

if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()

Hence, the required task is achieved!!

--

--