-
-
Save haruiz/7dfc7bd182355158302df7a3f12ee0df to your computer and use it in GitHub Desktop.
server.js code post: Microsoft Cognitive Services Notes: Face API (Node.js)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
//1. definimos las dependencias de nuestro proyecto | |
const Hapi = require("hapi"); | |
const Util = require("util"); | |
const Fs = require("fs"); | |
const Http = require("http"); | |
const Request = require("request"); | |
const Path = require("path"); | |
const Stream = require('stream'); | |
const config = { | |
FACE_API_KEY: "<KEY>", | |
FACE_API_ENDPOINT: "https://api.projectoxford.ai/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=true&returnFaceAttributes=age,gender,headPose,smile,facialHair,glasses" | |
} | |
//2.Instanciamos nuestro objeto server | |
const server = new Hapi.Server(); | |
//3. Inicializamos los modulos | |
server.register(require("inert"), function (err) { | |
if (err) | |
trow("failed to load the plugin " + err); | |
}); | |
//5. especificamos el puerto por el cual,nuestro objeto server atenderá las conexiones | |
server.connection({ port: process.env.port || 3000 }); | |
//6. Definimos las rutas de nuestra app | |
server.route({ | |
path: "/", method: "GET", handler: { | |
file: Path.join(__dirname, '/views') + "/index.html" | |
} | |
}); | |
//7.Contenido estatico de nuestro app (.css, .js, images etc) | |
server.route({ | |
path: "/public/{path*}", method: "GET", handler: { | |
directory: { path: Path.join(__dirname, 'public'), listing: true } | |
} | |
}); | |
/*recibe una imagen enviada via post desde el cliente(front-end) | |
en formato base64, y realiza el request al api | |
de reconocimiento facial*/ | |
server.route({ | |
path: "/detectfaces", | |
method: "POST", | |
config: { | |
//restricciones de archivo | |
payload: { | |
maxBytes: 1048576 * 50, /*50MB*/ | |
parse: true, | |
}, | |
/* | |
funcion que se ejecutará cada vez que una petición post al path /detectfaces | |
sea realizada*/ | |
handler: function (request, reply) { | |
//podemos obtener el buffer de la imagen si lo requerimos, de la siguiente forma | |
var base64Buffer = new Buffer(request.payload.image, "base64"); | |
var binaryBuffer = new Buffer(base64Buffer.toString("binary"), "binary"); | |
//generamos el request al api | |
var req = Request( | |
{ | |
url: config.FACE_API_ENDPOINT,//url de la api | |
method: 'POST', | |
headers: { | |
//formato de envío de la imagen al api | |
'Content-Type': 'application/octet-stream', | |
//tamaño del buffer | |
'Content-Length': binaryBuffer.length, | |
//suscription API KEY | |
'Ocp-Apim-Subscription-Key': config.FACE_API_KEY, | |
} | |
}, function (error, response, body) { | |
if (error) | |
reply(error); //en caso de que algo salga mal, retornamos al cliente el error | |
// si todo sale bien, devolvemos al cliente la respuesta del API | |
reply(body); | |
}); | |
/*creamos nuestro objeto stream a partir del buffer de la imagen y lo atachamos | |
/al cuerpo de la petici'on*/ | |
var bufferStream = new Stream.PassThrough(); | |
bufferStream.end(binaryBuffer); | |
bufferStream.pipe(req); | |
} | |
} | |
}); | |
//ejecutamos nuestro server | |
server.start(function (err) { | |
if (err) { throw err; } console.log('Server running at:', server.info.uri); | |
}); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment