DHEIVER commited on
Commit
e79d6a3
·
verified ·
1 Parent(s): cb7541e

Update app.js

Browse files
Files changed (1) hide show
  1. app.js +164 -59
app.js CHANGED
@@ -1,71 +1,176 @@
1
- // Access the video and canvas elements
2
- const video = document.getElementById('video');
3
- const canvas = document.getElementById('outputCanvas');
4
- const ctx = canvas.getContext('2d');
5
- const status = document.getElementById('status');
6
 
7
- // Initialize the hand pose detection model
8
- async function setupHandPoseDetection() {
9
- try {
10
- // Load the hand pose detection model
11
- const model = handPoseDetection.SupportedModels.MediaPipeHands;
12
- const detectorConfig = {
13
- runtime: 'tfjs', // Use TensorFlow.js runtime
14
- modelType: 'full' // Choose between 'lite' or 'full'
15
- };
16
- const detector = await handPoseDetection.createDetector(model, detectorConfig);
17
 
18
- status.textContent = 'Model loaded!';
19
-
20
- // Start video stream
21
- await setupCamera();
22
-
23
- // Detect hands in real-time
24
- detectHands(detector);
25
- } catch (error) {
26
- console.error('Error loading the model:', error);
27
- status.textContent = 'Failed to load model.';
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  }
29
  }
30
 
31
- // Set up the camera feed
32
- async function setupCamera() {
33
- const stream = await navigator.mediaDevices.getUserMedia({ video: true });
34
- video.srcObject = stream;
35
- return new Promise((resolve) => {
36
- video.onloadedmetadata = () => {
37
- resolve(video);
38
- };
39
- });
40
- }
41
 
42
- // Detect hands and draw keypoints
43
- async function detectHands(detector) {
44
- if (video.readyState === video.HAVE_ENOUGH_DATA) {
45
- // Clear the canvas
46
- ctx.clearRect(0, 0, canvas.width, canvas.height);
 
 
 
47
 
48
- // Detect hands in the video frame
49
- const hands = await detector.estimateHands(video);
 
 
 
 
 
 
 
 
 
50
 
51
- // Draw keypoints on the canvas
52
- if (hands.length > 0) {
53
- hands.forEach((hand) => {
54
- const keypoints = hand.keypoints;
55
- keypoints.forEach((keypoint) => {
56
- const { x, y } = keypoint;
57
- ctx.beginPath();
58
- ctx.arc(x, y, 5, 0, 2 * Math.PI); // Draw a circle at each keypoint
59
- ctx.fillStyle = 'red';
60
- ctx.fill();
61
- });
62
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  }
64
  }
65
-
66
- // Repeat detection
67
- requestAnimationFrame(() => detectHands(detector));
68
  }
69
 
70
- // Initialize the application
71
- setupHandPoseDetection();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import DeviceDetector from "https://cdn.skypack.dev/[email protected]";
 
 
 
 
2
 
3
+ // Uso: testSupport({client?: string, os?: string}[])
4
+ // Client e os são expressões regulares.
5
+ // Veja: https://cdn.jsdelivr.net/npm/[email protected]/README.md para
6
+ // valores legais para client e os
7
+ testSupport([
8
+ {client: 'Chrome'},
9
+ ]);
 
 
 
10
 
11
+ function testSupport(supportedDevices:{client?: string; os?: string;}[]) {
12
+ const deviceDetector = new DeviceDetector();
13
+ const detectedDevice = deviceDetector.parse(navigator.userAgent);
14
+ let isSupported = false;
15
+ for (const device of supportedDevices) {
16
+ if (device.client !== undefined) {
17
+ const re = new RegExp(`^${device.client}$`);
18
+ if (!re.test(detectedDevice.client.name)) {
19
+ continue;
20
+ }
21
+ }
22
+ if (device.os !== undefined) {
23
+ const re = new RegExp(`^${device.os}$`);
24
+ if (!re.test(detectedDevice.os.name)) {
25
+ continue;
26
+ }
27
+ }
28
+ isSupported = true;
29
+ break;
30
+ }
31
+ if (!isSupported) {
32
+ alert(`Esta demonstração, rodando em ${detectedDevice.client.name}/${detectedDevice.os.name}, ` +
33
+ `não é bem suportada no momento, continue por sua conta e risco.`);
34
  }
35
  }
36
 
37
+ const controls = window;
38
+ const drawingUtils = window;
39
+ const mpFaceMesh = window;
40
+ const config = {locateFile: (file) => {
41
+ return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh@` +
42
+ `${mpFaceMesh.VERSION}/${file}`;
43
+ }};
 
 
 
44
 
45
+ // Nossos quadros de entrada virão daqui.
46
+ const videoElement =
47
+ document.getElementsByClassName('input_video')[0] as HTMLVideoElement;
48
+ const canvasElement =
49
+ document.getElementsByClassName('output_canvas')[0] as HTMLCanvasElement;
50
+ const controlsElement =
51
+ document.getElementsByClassName('control-panel')[0] as HTMLDivElement;
52
+ const canvasCtx = canvasElement.getContext('2d')!;
53
 
54
+ /**
55
+ * Opções da solução.
56
+ */
57
+ const solutionOptions = {
58
+ selfieMode: true,
59
+ enableFaceGeometry: false,
60
+ maxNumFaces: 1,
61
+ refineLandmarks: false,
62
+ minDetectionConfidence: 0.5,
63
+ minTrackingConfidence: 0.5
64
+ };
65
 
66
+ // Adicionaremos isso ao nosso painel de controle mais tarde, mas salvaremos aqui para que possamos
67
+ // chamar tick() cada vez que o gráfico for executado.
68
+ const fpsControl = new controls.FPS();
69
+
70
+ // Otimização: Desative o spinner animado após sua animação de ocultação ser concluída.
71
+ const spinner = document.querySelector('.loading')! as HTMLDivElement;
72
+ spinner.ontransitionend = () => {
73
+ spinner.style.display = 'none';
74
+ };
75
+
76
+ function onResults(results: mpFaceMesh.Results): void {
77
+ // Ocultar o spinner.
78
+ document.body.classList.add('loaded');
79
+
80
+ // Atualizar a taxa de quadros.
81
+ fpsControl.tick();
82
+
83
+ // Desenhar as sobreposições.
84
+ canvasCtx.save();
85
+ canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
86
+ canvasCtx.drawImage(
87
+ results.image, 0, 0, canvasElement.width, canvasElement.height);
88
+ if (results.multiFaceLandmarks) {
89
+ for (const landmarks of results.multiFaceLandmarks) {
90
+ drawingUtils.drawConnectors(
91
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_TESSELATION,
92
+ {color: '#C0C0C070', lineWidth: 1});
93
+ drawingUtils.drawConnectors(
94
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_RIGHT_EYE,
95
+ {color: '#FF3030'});
96
+ drawingUtils.drawConnectors(
97
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_RIGHT_EYEBROW,
98
+ {color: '#FF3030'});
99
+ drawingUtils.drawConnectors(
100
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_LEFT_EYE,
101
+ {color: '#30FF30'});
102
+ drawingUtils.drawConnectors(
103
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_LEFT_EYEBROW,
104
+ {color: '#30FF30'});
105
+ drawingUtils.drawConnectors(
106
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_FACE_OVAL,
107
+ {color: '#E0E0E0'});
108
+ drawingUtils.drawConnectors(
109
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_LIPS, {color: '#E0E0E0'});
110
+ if (solutionOptions.refineLandmarks) {
111
+ drawingUtils.drawConnectors(
112
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_RIGHT_IRIS,
113
+ {color: '#FF3030'});
114
+ drawingUtils.drawConnectors(
115
+ canvasCtx, landmarks, mpFaceMesh.FACEMESH_LEFT_IRIS,
116
+ {color: '#30FF30'});
117
+ }
118
  }
119
  }
120
+ canvasCtx.restore();
 
 
121
  }
122
 
123
+ const faceMesh = new mpFaceMesh.FaceMesh(config);
124
+ faceMesh.setOptions(solutionOptions);
125
+ faceMesh.onResults(onResults);
126
+
127
+ // Apresentar um painel de controle através do qual o usuário pode manipular as opções da solução.
128
+ new controls
129
+ .ControlPanel(controlsElement, solutionOptions)
130
+ .add([
131
+ new controls.StaticText({title: 'MediaPipe Face Mesh'}),
132
+ fpsControl,
133
+ new controls.Toggle({title: 'Modo Selfie', field: 'selfieMode'}),
134
+ new controls.SourcePicker({
135
+ onFrame:
136
+ async (input: controls.InputImage, size: controls.Rectangle) => {
137
+ const aspect = size.height / size.width;
138
+ let width: number, height: number;
139
+ if (window.innerWidth > window.innerHeight) {
140
+ height = window.innerHeight;
141
+ width = height / aspect;
142
+ } else {
143
+ width = window.innerWidth;
144
+ height = width * aspect;
145
+ }
146
+ canvasElement.width = width;
147
+ canvasElement.height = height;
148
+ await faceMesh.send({image: input});
149
+ },
150
+ }),
151
+ new controls.Slider({
152
+ title: 'Número Máximo de Rostos',
153
+ field: 'maxNumFaces',
154
+ range: [1, 4],
155
+ step: 1
156
+ }),
157
+ new controls.Toggle(
158
+ {title: 'Refinar Pontos de Referência', field: 'refineLandmarks'}),
159
+ new controls.Slider({
160
+ title: 'Confiança Mínima de Detecção',
161
+ field: 'minDetectionConfidence',
162
+ range: [0, 1],
163
+ step: 0.01
164
+ }),
165
+ new controls.Slider({
166
+ title: 'Confiança Mínima de Rastreamento',
167
+ field: 'minTrackingConfidence',
168
+ range: [0, 1],
169
+ step: 0.01
170
+ }),
171
+ ])
172
+ .on(x => {
173
+ const options = x as mpFaceMesh.Options;
174
+ videoElement.classList.toggle('selfie', options.selfieMode);
175
+ faceMesh.setOptions(options);
176
+ });