Spaces:
Runtime error
Runtime error
File size: 4,070 Bytes
1a44db9 f6fcb5a 1a44db9 ff3b552 f6fcb5a ff3b552 f6fcb5a ff3b552 621e425 ff3b552 621e425 ff3b552 1a44db9 ff3b552 1a44db9 621e425 f6fcb5a 1a44db9 f6fcb5a 1a44db9 ff3b552 1a44db9 33723c9 1a44db9 33723c9 1a44db9 ff3b552 1a44db9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import gradio as gr
from matplotlib.pyplot import draw
import mediapipe as mp
import numpy as np
import tempfile
import mediapy as media
import log_utils
logger = log_utils.get_logger()
mp_hands = mp.solutions.hands
mp_hands_connections = mp.solutions.hands_connections
hands = mp_hands.Hands()
mp_draw = mp.solutions.drawing_utils
connections = {
'HAND_CONNECTIONS': mp_hands_connections.HAND_CONNECTIONS,
'HAND_PALM_CONNECTIONS': mp_hands_connections.HAND_PALM_CONNECTIONS,
'HAND_THUMB_CONNECTIONS': mp_hands_connections.HAND_THUMB_CONNECTIONS,
'HAND_INDEX_FINGER_CONNECTIONS': mp_hands_connections.HAND_INDEX_FINGER_CONNECTIONS,
'HAND_MIDDLE_FINGER_CONNECTIONS': mp_hands_connections.HAND_MIDDLE_FINGER_CONNECTIONS,
'HAND_RING_FINGER_CONNECTIONS': mp_hands_connections.HAND_RING_FINGER_CONNECTIONS,
'HAND_PINKY_FINGER_CONNECTIONS': mp_hands_connections.HAND_PINKY_FINGER_CONNECTIONS,
}
def draw_landmarks(img, selected_connections, draw_background):
results = hands.process(img)
output_img = img if draw_background else np.zeros_like(img)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_draw.draw_landmarks(output_img, hand_landmarks, connections[selected_connections])
return output_img
def process_image(img, selected_connections, draw_background):
logger.info(f"Processing image with connections: {selected_connections}, draw background: {draw_background}")
return draw_landmarks(img, selected_connections, draw_background)
def process_video(video_path, selected_connections, draw_background):
logger.info(f"Processing video with connections: {selected_connections}, draw background: {draw_background}")
with tempfile.NamedTemporaryFile() as f:
out_path = f"{f.name}.{video_path.split('.')[-1]}"
with media.VideoReader(video_path) as r:
with media.VideoWriter(
out_path, shape=r.shape, fps=r.fps, bps=r.bps) as w:
for image in r:
w.add_image(draw_landmarks(image, selected_connections, draw_background))
return out_path
demo = gr.Blocks()
with demo:
gr.Markdown(
"""
# Hand & Finger Tracking
This is a demo of hand and finger tracking using [Google's MediaPipe](https://google.github.io/mediapipe/solutions/hands.html).
""")
with gr.Column():
draw_background = gr.Checkbox(value=True, label="Draw background?")
connection_keys = list(connections.keys())
selected_connections = gr.Dropdown(
label="Select connections to draw",
choices=connection_keys,
value=connection_keys[0],
)
with gr.Tabs():
with gr.TabItem(label="Upload an image"):
uploaded_image = gr.Image(type="numpy")
submit_uploaded_image = gr.Button(value="Process Image")
with gr.TabItem(label="Take a picture"):
camera_picture = gr.Image(source="webcam", type="numpy")
submit_camera_picture = gr.Button(value="Process Image")
with gr.TabItem(label="Record a video"):
recorded_video = gr.Video(source="webcam", format="mp4")
submit_recorded_video = gr.Button(value="Process Video")
with gr.TabItem(label="Upload a video"):
uploaded_video = gr.Video(format="mp4")
submit_uploaded_video = gr.Button(value="Process Video")
with gr.Column():
processed_video = gr.Video()
processed_image = gr.Image()
gr.Markdown('<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=kristyc.mediapipe-hands" />')
setting_inputs = [selected_connections, draw_background]
submit_uploaded_image.click(fn=process_image, inputs=[uploaded_image, *setting_inputs], outputs=[processed_image])
submit_camera_picture.click(fn=process_image, inputs=[camera_picture, *setting_inputs], outputs=[processed_image])
submit_recorded_video.click(fn=process_video, inputs=[recorded_video, *setting_inputs], outputs=[processed_video])
submit_uploaded_video.click(fn=process_video, inputs=[recorded_video, *setting_inputs], outputs=[processed_video])
demo.launch()
|