Last active
November 12, 2018 15:44
-
-
Save adekunleba/7c92b3d1adb4bff095fdc2208c27c43c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
resource_field = {"Message": fields.String, | |
"Image_Embedding": fields.List(fields.String), | |
"ID_Embedding": fields.List(fields.String), | |
"Confidence": fields.String, | |
"Round Trip Time": fields.String, | |
"Process First Image Time": fields.String, | |
"Process Second Image Time": fields.String, | |
"Generate and Compare Time": fields.String} | |
@ns.route('/imagepred') | |
class FaceNetPred(Resource): | |
@ns.doc(description="Generating Embeddings for face using FaceNet", | |
responses={200: "Success", | |
400: "Bad Request", | |
500: "Internal Server Error" | |
}) | |
@ns.expect(image) | |
def post(self): | |
try: | |
start_time = time.time() | |
print("Start time is {}".format(start_time), flush=True) | |
image_data = request.json | |
pred_time = time.time() | |
image_orig = image_data['image1'] | |
image_id = image_data['image2'] | |
#Decode Images Byte Array | |
start_decoding_image = time.time() | |
or_image = _read_image(image_orig) | |
or_image = np.array(or_image) | |
start_decoding_second_image = time.time() | |
or_image = _read_image(image_orig) | |
or_image = np.array(or_image) | |
start_decoding_second_image = time.time() | |
id_image = _read_image(image_id) | |
id_image = np.array(id_image) | |
#Process the image Embedding in to appraoch, | |
# One on the original image using mtcnn face cropping and the other on the sent cropped | |
#Image. | |
#For the cropped image, the only thing that remains is prewhitenning | |
end_decoding_images = time.time() | |
message, img_emb, id_emb, confidence = id_face_pred(or_image, id_image) | |
end_comparing_signatures = time.time() | |
round_time_interval = "{}".format(time.time() - start_time) | |
process_first_image_time = "{}".format(start_decoding_second_image - pred_time) | |
process_second_image_time = "{}".format(end_decoding_images - start_decoding_second_image) | |
gen_compare_signature_time = "{}".format(end_comparing_signatures - end_decoding_images) | |
print("Time interval is {}".format(round_time_interval), flush=True) | |
print("After Uploads time for prediction is {}".format(time.time() - pred_time), flush=True) | |
data = {"Message":message, "Image_Embedding":list(img_emb), "ID_Embedding":list(id_emb), | |
"Confidence": confidence, "Round Trip Time": round_time_interval, | |
"Process First Image Time":process_first_image_time , "Process Second Image Time":process_second_image_time, | |
"Generate and Compare Time":gen_compare_signature_time } | |
response = marshal(data, resource_field) | |
except Exception as inst: | |
return {"message": "Something wrong with incoming request." | |
"Original Message:; {}".format(inst)}, 400 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment