audiv027-2024-1

Clase 17-05-2024

Bill T. Jones

https://mlart.co/item/detect-poses-and-voice-and-add-the-spoken-words-live-in-the-choreography Hecho con: Google Creative Lab Tecnología usada: PoseNet, Camera, Voice detection, Microphone ¿Qué hace? -> Detect poses and voice and add the spoken words live in the choreography

*¿como interpreta la pose?

<!DOCTYPE html>
<html>

<head>
    <title>PoseNet Sketchbook</title>
    <link rel="stylesheet" href="style.css">
    <link rel="icon" href="../../favicon.ico" type="image/png" />
    <meta name="viewport" content="width=device-width, initial-scale=1">
</head>

<body>
    <div id="info" style='display:none'>
    </div>
    <div id="loading">
        Loading the model...
    </div>
    <div id='main' style='display:none'>
        <video id="video" playsinline style=" -moz-transform: scaleX(-1);
        -o-transform: scaleX(-1);
        -webkit-transform: scaleX(-1);
        transform: scaleX(-1);
        display: none;
        ">
        </video>
        <div id="canvas-container">
            <canvas id="output" />
        </div>
    </div>
    <script src="js/main.js"></script>
</body>
</html>
# **explicacion del codigo*
<meta name="viewport" content="width=device-width, initial-scale=1">: Configura los parametros de visualizacion para que pueda ser visualizado desde dispositivos moviles 
<script src="js/main.js"></script>: se incluye un archivo JavaScript externo main.js que probablemente contiene el código necesario para manejar la lógica de PoseNet

*codigo de deteccion de cuerpo:

/**
 * @license
 * Copyright 2019 Google Inc. All Rights Reserved.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 * =============================================================================
 */
let canvas = document.getElementById('output');
let ctx = canvas.getContext('2d');
let poseDetection;
let poses = [];
let video;
let videoWidth, videoHeight;

const color = 'aqua';
const lineWidth = 2;

let sketchGuiState = {
  showVideo: true,
  showSkeleton: true,
  showPoints: true,
};

export function setupSketch(
  thePoseDetection,
  theVideo,
  theVideoWidth,
  theVideoHeight
) {
  poseDetection = thePoseDetection;
  video = theVideo;
  videoWidth = theVideoWidth;
  videoHeight = theVideoHeight;

  canvas.width = videoWidth;
  canvas.height = videoHeight;
  sketchLoop();
}

export function initSketchGui(gui) {
  gui.open();
  gui.add(sketchGuiState, 'showVideo');
  gui.add(sketchGuiState, 'showSkeleton');
  gui.add(sketchGuiState, 'showPoints');
}

let getNewFrame = true;
async function sketchLoop() {
  if (getNewFrame) {
    poses = await poseDetection.getPoses();
  }
  getNewFrame = !getNewFrame;

  let minPoseConfidence;
  let minPartConfidence;

  switch (poseDetection.guiState.algorithm) {
    case 'single-pose':
      minPoseConfidence = +poseDetection.guiState.singlePoseDetection
        .minPoseConfidence;
      minPartConfidence = +poseDetection.guiState.singlePoseDetection
        .minPartConfidence;
      break;
    case 'multi-pose':
      minPoseConfidence = +poseDetection.guiState.multiPoseDetection
        .minPoseConfidence;
      minPartConfidence = +poseDetection.guiState.multiPoseDetection
        .minPartConfidence;
      break;
  }

  ctx.save();

  ctx.clearRect(0, 0, canvas.width, canvas.height);

  ctx.fillStyle = 'black';
  ctx.fillRect(0, 0, canvas.width, canvas.height);

  // Draw the video on the canvas
  if (sketchGuiState.showVideo) {
    ctx.save();
    ctx.scale(-1, 1);
    ctx.translate(-canvas.width, 0);
    ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
    ctx.restore();
  }

  poses.forEach(({score, keypoints}) => {
    if (score >= minPoseConfidence) {
      if (sketchGuiState.showPoints) {
        drawKeypoints(keypoints, minPartConfidence, ctx);
      }
      if (sketchGuiState.showSkeleton) {
        drawSkeleton(keypoints, minPartConfidence, ctx);
      }
    }
  });

  ctx.restore();

  requestAnimationFrame(sketchLoop);
}

function toTuple({y, x}) {
  return [y, x];
}

export function drawPoint(ctx, y, x, r, color) {
  ctx.beginPath();
  ctx.arc(x, y, r, 0, 2 * Math.PI);
  ctx.fillStyle = color;
  ctx.fill();
}

/**
 * Draws a line on a canvas, i.e. a joint
 */
export function drawSegment([ay, ax], [by, bx], color, scale, ctx) {
  ctx.beginPath();
  ctx.moveTo(ax * scale, ay * scale);
  ctx.lineTo(bx * scale, by * scale);
  ctx.lineWidth = lineWidth;
  ctx.strokeStyle = color;
  ctx.stroke();
}

/**
 * Draws a pose skeleton by looking up all adjacent keypoints/joints
 */
export function drawSkeleton(keypoints, minConfidence, ctx, scale = 1) {
  const adjacentKeyPoints = poseDetection.getAdjacentKeyPoints(
    keypoints,
    minConfidence
  );

  adjacentKeyPoints.forEach((keypoints) => {
    drawSegment(
      toTuple(keypoints[0].position),
      toTuple(keypoints[1].position),
      color,
      scale,
      ctx
    );
  });
}

/**
 * Draw pose keypoints onto a canvas
 */
export function drawKeypoints(keypoints, minConfidence, ctx, scale = 1) {
  for (let i = 0; i < keypoints.length; i++) {
    const keypoint = keypoints[i];

    if (keypoint.score < minConfidence) {
      continue;
    }

    const {y, x} = keypoint.position;
    drawPoint(ctx, y * scale, x * scale, 5, color);
  }
}

# explicación del codigo: dentro de la configuración inicial: