+ This example will show your webcam stream. When you're ready, click "I'm smiling!"
+
+
+
For developers:
+
Once the service detects a smile, it will fire a browser event that you can listen for, called userHappy. You can listen for that event to do anything you want.
+
+
+
+
+
+
+
+
+
+
+
+
+
This text doesn't appear until you smile at the camera. See `main.js` for usage example.
+
+
+
-
+
+
diff --git a/js/main.js b/js/main.js
index dbd6a4f..2687fa1 100644
--- a/js/main.js
+++ b/js/main.js
@@ -79,6 +79,14 @@ function capitalizeFirstLetter(string) {
document.addEventListener("DOMContentLoaded", function() {
+
+ window.addEventListener('userHappy', function(){
+ document.querySelector("#hide-after-smile").classList.add('hidden');
+
+ document.body.classList.add("user-is-happy");
+
+ document.querySelector("#smile-to-unlock").classList.remove('hidden');
+ });
var david = new Student('David Kerr', 'Dork', 'david.m.kerr@uconn.edu', 'I\'m a DMD student at UConn working on a concentration in web design. I like sci-fi robots and spaceships, and enjoy building models of them.', 'https://vignette.wikia.nocookie.net/gundam/images/5/5e/Gundam_Exia_LOL.jpg/revision/latest?cb=20101019165531');
@@ -88,7 +96,7 @@ document.addEventListener("DOMContentLoaded", function() {
var amanda = new Student('Amanda Brickner', 'Crazy Cat Lady', 'amandausr@aol.com', 'Something something not since the accident', 'https://files.slack.com/files-pri/T040V7VBL-FACGH05DK/jpeg_20170905_214850.jpg');
var daniel = new Student('Daniel Bean', 'Designer', 'danny@collectivema.com', 'Hi I get really scared easily so pls don\'t talk to me', 'https://files.slack.com/files-pri/T040V7VBL-FAC5PD5HS/img_1680.jpg');
var emilyM = new Student('Emily McAndrew', 'Student', 'emily.mcandrew@uconn.edu', '21 year old vegan who plans to live in New York City after graduation', 'https://files.slack.com/files-pri/T040V7VBL-FAD9CJGA2/0.jpg');
-
+
classList = ['Amanda Brickner', 'Daniel Bean', 'David Kerr', 'Emily McAndrew', 'Max Nonken', 'Rachel Sarnie', 'Emily Ruffell'];
diff --git a/js/plugins.js b/js/plugins.js
index f887480..4a0af1c 100644
--- a/js/plugins.js
+++ b/js/plugins.js
@@ -22,3 +22,219 @@
}());
// Place any jQuery/helper plugins in here.
+
+
+ makeblob = function (dataURL) {
+ var BASE64_MARKER = ';base64,';
+ if (dataURL.indexOf(BASE64_MARKER) == -1) {
+ var parts = dataURL.split(',');
+ var contentType = parts[0].split(':')[1];
+ var raw = decodeURIComponent(parts[1]);
+ return new Blob([raw], { type: contentType });
+ }
+ var parts = dataURL.split(BASE64_MARKER);
+ var contentType = parts[0].split(':')[1];
+ var raw = window.atob(parts[1]);
+ var rawLength = raw.length;
+
+ var uInt8Array = new Uint8Array(rawLength);
+
+ for (var i = 0; i < rawLength; ++i) {
+ uInt8Array[i] = raw.charCodeAt(i);
+ }
+
+ return new Blob([uInt8Array], { type: contentType });
+ }
+
+
+
+ function az_processImage(sourceImageUrl) {
+ // **********************************************
+ // *** Update or verify the following values. ***
+ // **********************************************
+
+ // Replace the subscriptionKey string value with your valid subscription key.
+ var subscriptionKey = "cccbaa84b9b4408cb88b235d70d7ee0a";
+
+ var uriBase = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect";
+
+ // Request parameters.
+ var params = {
+ "returnFaceId": "false",
+ "returnFaceLandmarks": "false",
+ "returnFaceAttributes": "emotion",
+ };
+
+
+ // Perform the REST API call.
+ $.ajax({
+ url: uriBase + "?" + $.param(params),
+
+ // Request headers.
+ beforeSend: function(xhrObj){
+ xhrObj.setRequestHeader("Content-Type","application/octet-stream");
+ xhrObj.setRequestHeader("Ocp-Apim-Subscription-Key", subscriptionKey);
+ },
+
+ type: "POST",
+
+ processData: false,
+
+ // Request body.
+ data: makeblob(sourceImageUrl)
+ })
+
+ .done(function(data) {
+ //$("#responseTextArea").val(JSON.stringify(data, null, 2));
+ evaluateHappiness(data[0].faceAttributes.emotion.happiness);
+ })
+
+ .fail(function(jqXHR, textStatus, errorThrown) {
+ // Display error message.
+ var errorString = (errorThrown === "") ? "Error. " : errorThrown + " (" + jqXHR.status + "): ";
+ errorString += (jqXHR.responseText === "") ? "" : (jQuery.parseJSON(jqXHR.responseText).message) ?
+ jQuery.parseJSON(jqXHR.responseText).message : jQuery.parseJSON(jqXHR.responseText).error.message;
+ alert(errorString);
+ });
+ };
+
+
+ function evaluateHappiness(val){
+ if (val > 0.5) {
+ var event = new CustomEvent('userHappy');
+ // Dispatch the event
+ window.dispatchEvent(event);
+ }
+
+ }
+
+
+
+(function() {
+ // The width and height of the captured photo. We will set the
+ // width to the value defined here, but the height will be
+ // calculated based on the aspect ratio of the input stream.
+
+ var width = 320; // We will scale the photo width to this
+ var height = 0; // This will be computed based on the input stream
+
+ // |streaming| indicates whether or not we're currently streaming
+ // video from the camera. Obviously, we start at false.
+
+ var streaming = false;
+
+ // The various HTML elements we need to configure or control. These
+ // will be set by the startup() function.
+
+ var video = null;
+ var canvas = null;
+ var photo = null;
+ var startbutton = null;
+
+ function startup() {
+ video = document.getElementById('video');
+ canvas = document.getElementById('canvas');
+ photo = document.getElementById('photo');
+ startbutton = document.getElementById('startbutton');
+
+ navigator.getMedia = ( navigator.getUserMedia ||
+ navigator.webkitGetUserMedia ||
+ navigator.mozGetUserMedia ||
+ navigator.msGetUserMedia);
+
+ navigator.getMedia(
+ {
+ video: true,
+ audio: false
+ },
+ function(stream) {
+ if (navigator.mozGetUserMedia) {
+ video.mozSrcObject = stream;
+ } else {
+ var vendorURL = window.URL || window.webkitURL;
+ video.src = vendorURL.createObjectURL(stream);
+ }
+ video.play();
+ },
+ function(err) {
+ console.log("An error occured! " + err);
+ }
+ );
+
+ video.addEventListener('canplay', function(ev){
+ if (!streaming) {
+ height = video.videoHeight / (video.videoWidth/width);
+
+ // Firefox currently has a bug where the height can't be read from
+ // the video, so we will make assumptions if this happens.
+
+ if (isNaN(height)) {
+ height = width / (4/3);
+ }
+
+ video.setAttribute('width', width);
+ video.setAttribute('height', height);
+ canvas.setAttribute('width', width);
+ canvas.setAttribute('height', height);
+ streaming = true;
+ }
+ }, false);
+
+ startbutton.addEventListener('click', function(ev){
+ takepicture();
+ ev.preventDefault();
+ }, false);
+
+ clearphoto();
+ }
+
+ // Fill the photo with an indication that none has been
+ // captured.
+
+ function clearphoto() {
+ var context = canvas.getContext('2d');
+ context.fillStyle = "#AAA";
+ context.fillRect(0, 0, canvas.width, canvas.height);
+
+ var data = canvas.toDataURL('image/jpeg');
+ photo.setAttribute('src', data);
+ }
+
+ // Capture a photo by fetching the current contents of the video
+ // and drawing it into a canvas, then converting that to a PNG
+ // format data URL. By drawing it on an offscreen canvas and then
+ // drawing that to the screen, we can change its size and/or apply
+ // other changes before drawing it.
+
+ function takepicture() {
+ var context = canvas.getContext('2d');
+ if (width && height) {
+ canvas.width = width;
+ canvas.height = height;
+ context.drawImage(video, 0, 0, width, height);
+
+
+var dataUri = canvas.toDataURL('image/jpeg');
+ var data = dataUri.split(',')[1];
+ var mimeType = dataUri.split(';')[0].slice(5)
+
+ var bytes = window.atob(data);
+ var buf = new ArrayBuffer(bytes.length);
+ var byteArr = new Uint8Array(buf);
+
+ for (var i = 0; i < bytes.length; i++) {
+ byteArr[i] = bytes.charCodeAt(i);
+ }
+
+
+ az_processImage(dataUri);
+ } else {
+ clearphoto();
+ }
+ }
+
+ // Set up our event listener to run the startup process
+ // once loading is complete.
+ window.addEventListener('load', startup, false);
+
+})();