拿 ml5 来练习 p5.js (二)

介绍

首先介绍什么是 p5.js,
p5.js 是基于 Processing 在浏览器中提供友善的画布 (canvas) 使用介面。(https://creativecoding.in/2020/04/24/p5-js-%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B/)

p5.js 与 ml5 的共同点是它们都可以透过浏览器使用 GPU 及 GPU 的专属记忆体。

备料

接着备料,

在 hello-ml5 里新增两个档案,一档名为 face_p5.html,另一档名为 face_nop5.html。在 face_p5.html 与 face_nop5.html 分别输入以下程式码。

face_p5.html 的程式码如下—

<html><head>    <meta charset="UTF-8">    <title>FaceApi Landmarks Demo With p5.js</title>    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/p5.min.js"></script>    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/addons/p5.dom.min.js"></script>    <script src="https://unpkg.com/ml5@latest/dist/ml5.min.js"></script></head><body>    <h1>FaceApi Landmarks Demo With p5.js</h1>    <script src="sketch_face_p5.js"></script></body></html>

face_nop5.html 的程式码如下—

<html><head>    <meta charset="UTF-8">    <title>FaceApi Landmarks Demo With no p5.js</title>    <script src="https://unpkg.com/ml5@latest/dist/ml5.min.js" type="text/javascript"></script></head><body>    <h1>FaceApi Landmarks Demo With no p5.js</h1>    <script src="sketch_face_nop5.js"></script></body></html>
在 hello-ml5 里新增两个档案,一档名为 sketch_face_p5.js,另一档名为 sketch_face_nop5.js,
且分别输入以下程式码。

sketch_face_p5.js 的程式码如下—

let faceapi;let video;let detections;const detection_options = {    withLandmarks: true,    withDescriptors: false}let width = 360;let height = 280;function setup() {    createCanvas(width, height);    // load up your video    video = createCapture(VIDEO);    video.size(width, height);    video.hide(); // Hide the video element, and just show the canvas    faceapi = ml5.faceApi(video, detection_options, modelReady);}function modelReady() {    console.log('ready!');    faceapi.detect(gotResults);}function gotResults(err, result) {    if (err) {        console.log(err);        return;    }    // console.log(result)    detections = result;    image(video, 0, 0, width, height);    if (detections) {        if (detections.length > 0) {            drawLandmarks(detections);        }    }    faceapi.detect(gotResults);}function drawLandmarks(detections) {    stroke(161, 95, 251);    for (let i = 0; i < detections.length; i++) {        const alignedRect = detections[i].alignedRect;        const x = alignedRect._box._x;        const y = alignedRect._box._y;        const boxWidth = alignedRect._box._width;        const boxHeight = alignedRect._box._height;        rect(x, y, boxWidth, boxHeight);        const mouth = detections[i].parts.mouth;        const nose = detections[i].parts.nose;        const leftEye = detections[i].parts.leftEye;        const rightEye = detections[i].parts.rightEye;        const rightEyeBrow = detections[i].parts.rightEyeBrow;        const leftEyeBrow = detections[i].parts.leftEyeBrow;        drawPart(mouth, true);        drawPart(nose, false);        drawPart(leftEye, true);        drawPart(leftEyeBrow, false);        drawPart(rightEye, true);        drawPart(rightEyeBrow, false);    }}function drawPart(feature, closed) {    beginShape();    for (let i = 0; i < feature.length; i++) {        const x = feature[i]._x;        const y = feature[i]._y;        vertex(x, y);    }    if (closed === true) {        endShape(CLOSE);    } else {        endShape();    }}

sketch_face_nop5.js 的程式码如下—

let faceapi;let video;let detections;const detection_options = {    withLandmarks: true,    withDescriptors: false}let width = 360;let height = 280;window.addEventListener('DOMContentLoaded', function () {    setup();});async function setup() {    createCanvas(width, height);    // load up your video    video = await getVideo();    faceapi = ml5.faceApi(video, detection_options, modelReady);}let canvas, ctx;function createCanvas(w, h) {    canvas = document.createElement("canvas");    canvas.width = w;    canvas.height = h;    document.body.appendChild(canvas);    ctx = canvas.getContext('2d');}async function getVideo() {    const videoElement = document.createElement('video');    videoElement.width = width;    videoElement.height = height;    videoElement.setAttribute("style", "display: none;"); // Hide the video element, and just show the canvas    document.body.appendChild(videoElement);    // Create a webcam capture    const capture = await navigator.mediaDevices.getUserMedia({ video: true });    videoElement.srcObject = capture;    videoElement.play();    return videoElement;}function modelReady() {    console.log('ready!');    faceapi.detect(gotResults);}function gotResults(err, result) {    if (err) {        console.log(err);        return;    }    // console.log(result)    detections = result;    ctx.drawImage(video, 0, 0, width, height);    if (detections) {        if (detections.length > 0) {            drawLandmarks(detections);        }    }    faceapi.detect(gotResults);}function drawLandmarks(detections) {    ctx.strokeStyle = "#a15ffb";    for (let i = 0; i < detections.length; i++) {        const alignedRect = detections[i].alignedRect;        const x = alignedRect._box._x;        const y = alignedRect._box._y;        const boxWidth = alignedRect._box._width;        const boxHeight = alignedRect._box._height;        ctx.beginPath();        ctx.rect(x, y, boxWidth, boxHeight);        ctx.fillStyle = "white";        ctx.fill();        const mouth = detections[i].parts.mouth;        const nose = detections[i].parts.nose;        const leftEye = detections[i].parts.leftEye;        const rightEye = detections[i].parts.rightEye;        const rightEyeBrow = detections[i].parts.rightEyeBrow;        const leftEyeBrow = detections[i].parts.leftEyeBrow;        drawPart(mouth, true);        drawPart(nose, false);        drawPart(leftEye, true);        drawPart(leftEyeBrow, false);        drawPart(rightEye, true);        drawPart(rightEyeBrow, false);    }}function drawPart(feature, closed) {    ctx.beginPath();    for (let i = 0; i < feature.length; i++) {        const x = feature[i]._x;        const y = feature[i]._y;        if (i === 0) {            ctx.moveTo(x, y);        } else {            ctx.lineTo(x, y);        }    }    if (closed === true) {        ctx.closePath();    }    ctx.stroke();}

执行

备料完成后,就可启动 Live Server,
在 VS Code 里的 face_p5.html 程式码按右键,在显示的内容选单里,点选 Open with Live Server,
就可显示如下画面。(会先跳出询问是否让此网页有使用网路摄影机的权限,请按同意)
http://img2.58codes.com/2024/20132156w825fEFlIQ.png
face_nop5.html 同上的步骤启动,
就可显示如下画面。
http://img2.58codes.com/2024/20132156LsMuGLiKB9.png
可见两者的执行结果是一样的。

比较

使用 WinMerge 比较 sketch_face_p5.js 与 sketch_face_nop5.js 的差异,
就可以了解 p5.js 能做的, javascript 原本就能做到,
但 p5.js 真的把程式码变得更简洁,在这个例子几乎减少了四分之一的程式码。

所以, p5.js 及 ml5 都是让程式设计师在使用新技术时,可以更专注在
客製化的需求。


关于作者: 网站小编

码农网专注IT技术教程资源分享平台,学习资源下载网站,58码农网包含计算机技术、网站程序源码下载、编程技术论坛、互联网资源下载等产品服务,提供原创、优质、完整内容的专业码农交流分享平台。

热门文章