Skip to docs navigation

04.04 ml5.js

ml5.jsexternal link is a machine learning JavaScript Library that works well with p5.js.

From the ml5.js documentation, in order to use the library, it must be referenced in the <head> of the HTML page that you wish to use it.

<!-- include ml5.js library from Content Delivery Network -->
<script src="https://unpkg.com/ml5@latest/dist/ml5.min.js"></script>

ml5.js Examples

ml5.js is a machine learning JavaScript library built on top of tensorflow.js, a project by Google. ml5.js handles a lot of the busy work needed to work with tensorflow and is designed to work well with p5.js. Instead of requiring cloud processing of machine learning training data, ml5.js allows you to get started quickly with machine learning directly in a web browser.

Handpose

Using a webcam or other camera input it is possible to track the position and pose of a hand.

ml5.js Handpose Demo Sketch from ml5.js Documentationexternal link

When handpose detects a hand it returns results which is a array of different objects that contain arrays of the coordinates of points on the hand in x ,y, and z space. To see this array, use this code in the draw() function.

// show the predictions array in the console if it is not empty and then stop the program after one detections
if (predictions.length > 0) {
  console.log(predictions);
  noLoop();
}

Wait for a hand to be detected and then look at the predictions array in the console log.

ml5.js Handpose Results Array

Below is the typical output of the results array stored in the predictions variable output to the Console of the p5.js web editor.

  • (1) [Object]
    • 0: Object
      • handInViewConfidence: 0.9440168142318726
    • boundingBox: Object
      • topLeft: Array(2)
      • bottomRight: Array(2)
    • landmarks: Array(21)
      • 0: Array(3)
      • 1: Array(3)
      • 2: Array(3)
      • 3: Array(3)
      • 4: Array(3)
      • 5: Array(3)
      • 6: Array(3)
      • 7: Array(3)
      • 8: Array(3)
      • 9: Array(3)
      • 10: Array(3)
      • 11: Array(3)
      • 12: Array(3)
      • 13: Array(3)
      • 14: Array(3)
      • 15: Array(3)
      • 16: Array(3)
      • 17: Array(3)
      • 18: Array(3)
      • 19: Array(3)
      • 20: Array(3)
    • annotations: Object
      • thumb: Array(4)
        • 0: Array(3)
        • 1: Array(3)
        • 2: Array(3)
        • 3: Array(3)
      • indexFinger: Array(4)
        • 0: Array(3)
        • 1: Array(3)
        • 2: Array(3)
        • 3: Array(3)
      • middleFinger: Array(4)
        • 0: Array(3)
        • 1: Array(3)
        • 2: Array(3)
        • 3: Array(3)
      • ringFinger: Array(4)
        • 0: Array(3)
        • 1: Array(3)
        • 2: Array(3)
        • 3: Array(3)
      • pinky: Array(4)
        • 0: Array(3)
        • 1: Array(3)
        • 2: Array(3)
        • 3: Array(3)
      • palmBase: Array(1)
        • 0: Array(3)

Inside one of the arrays is indexed coordinate values with floating point numbers.

  • indexFinger: Array(4)
    • 0: Array(3)
      • 0: 234.00797608287567
      • 1: 259.7274989915427
      • 2: -7.029378414154053

Handpose Example from ml5.js Documentation

Handpose Example from ml5.js Documentationexternal link

// example from ml5.js documentation
// https://learn.ml5js.org/#/reference/handpose
// https://editor.p5js.org/ml5/sketches/Handpose_Webcam

let handpose;
let video;
let predictions = [];

function setup() {
  createCanvas(640, 480);
  video = createCapture(VIDEO);
  video.size(width, height);

  handpose = ml5.handpose(video, modelReady);

  // This sets up an event that fills the global variable "predictions"
  // with an array every time new hand poses are detected
  handpose.on("predict", (results) => {
    predictions = results;
  });

  // Hide the video element, and just show the canvas
  video.hide();
}

function modelReady() {
  console.log("Model ready!");
}

function draw() {
  image(video, 0, 0, width, height);

  // We can call both functions to draw all keypoints and the skeletons
  drawKeypoints();
}

// A function to draw ellipses over the detected keypoints
function drawKeypoints() {
  for (let i = 0; i < predictions.length; i += 1) {
    const prediction = predictions[i];
    for (let j = 0; j < prediction.landmarks.length; j += 1) {
      const keypoint = prediction.landmarks[j];
      fill(0, 255, 0);
      noStroke();
      ellipse(keypoint[0], keypoint[1], 10, 10);
    }
  }
}

Handpose Options

These are options that can be passed to the ml5.js handpose model when created it setup. Instead of handpose = ml5.handpose(video, modelReady); add an options argument, handpose = ml5.handpose(video, options modelReady);

const options = {
  flipHorizontal: false, // boolean value for if the video should be flipped, defaults to false
  maxContinuousChecks: Infinity, // How many frames to go without running the bounding box detector. Defaults to infinity, but try a lower value if the detector is consistently producing bad predictions.
  detectionConfidence: 0.8, // Threshold for discarding a prediction. Defaults to 0.8.
  scoreThreshold: 0.75, // A threshold for removing multiple (likely duplicate) detections based on a "non-maximum suppression" algorithm. Defaults to 0.75
  iouThreshold: 0.3, // A float representing the threshold for deciding whether boxes overlap too much in non-maximum suppression. Must be between [0, 1]. Defaults to 0.3.
};

Facemesh

Facemesh p5.js Editor Demo from ml5.js Documentationexternal link

let facemesh;
let video;
let predictions = [];

function setup() {
  createCanvas(640, 480);
  video = createCapture(VIDEO);
  video.size(width, height);

  facemesh = ml5.facemesh(video, modelReady);

  // This sets up an event that fills the global variable "predictions"
  // with an array every time new predictions are made
  facemesh.on("predict", (results) => {
    predictions = results;
  });

  // Hide the video element, and just show the canvas
  video.hide();
}

function modelReady() {
  console.log("Model ready!");
}

function draw() {
  image(video, 0, 0, width, height);

  // We can call both functions to draw all keypoints
  drawKeypoints();
}

// A function to draw ellipses over the detected keypoints
function drawKeypoints() {
  for (let i = 0; i < predictions.length; i += 1) {
    const keypoints = predictions[i].scaledMesh;

    // Draw facial keypoints.
    for (let j = 0; j < keypoints.length; j += 1) {
      const [x, y] = keypoints[j];

      fill(0, 255, 0);
      ellipse(x, y, 5, 5);
    }
  }
}

Ml5.js Posenet

ml5.js Posenet Reference Pageexternal link

// Example Sketch from ml5.js
// https://learn.ml5js.org/#/reference/posenet
// https://editor.p5js.org/ml5/sketches/PoseNet_webcam

// Copyright (c) 2019 ml5
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT

/* ===
ml5 Example
PoseNet example using p5.js
=== */

let video;
let poseNet;
let poses = [];

function setup() {
  createCanvas(640, 480);
  video = createCapture(VIDEO);
  video.size(width, height);

  // Create a new poseNet method with a single detection
  poseNet = ml5.poseNet(video, modelReady);
  // This sets up an event that fills the global variable "poses"
  // with an array every time new poses are detected
  poseNet.on("pose", function (results) {
    poses = results;
  });
  // Hide the video element, and just show the canvas
  video.hide();
}

function modelReady() {
  select("#status").html("Model Loaded");
}

function draw() {
  image(video, 0, 0, width, height);

  // We can call both functions to draw all keypoints and the skeletons
  drawKeypoints();
  drawSkeleton();
}

// A function to draw ellipses over the detected keypoints
function drawKeypoints() {
  // Loop through all the poses detected
  for (let i = 0; i < poses.length; i++) {
    // For each pose detected, loop through all the keypoints
    let pose = poses[i].pose;
    for (let j = 0; j < pose.keypoints.length; j++) {
      // A keypoint is an object describing a body part (like rightArm or leftShoulder)
      let keypoint = pose.keypoints[j];
      // Only draw an ellipse is the pose probability is bigger than 0.2
      if (keypoint.score > 0.2) {
        fill(255, 0, 0);
        noStroke();
        ellipse(keypoint.position.x, keypoint.position.y, 10, 10);
      }
    }
  }
}

// A function to draw the skeletons
function drawSkeleton() {
  // Loop through all the skeletons detected
  for (let i = 0; i < poses.length; i++) {
    let skeleton = poses[i].skeleton;
    // For every skeleton, loop through all body connections
    for (let j = 0; j < skeleton.length; j++) {
      let partA = skeleton[j][0];
      let partB = skeleton[j][1];
      stroke(255, 0, 0);
      line(
        partA.position.x,
        partA.position.y,
        partB.position.x,
        partB.position.y
      );
    }
  }
}