Updated Application File
Browse files
app.py
CHANGED
@@ -18,10 +18,8 @@ output_frames = []
|
|
18 |
|
19 |
def find_hands(brain, img):
|
20 |
if img is not None:
|
21 |
-
# print(type(img))
|
22 |
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # opencv image is in BGR form but mp is trained with RGB
|
23 |
-
results = brain.process(
|
24 |
-
img_rgb) # process finds the hands and outputs classification and 21 landmarks for each hand
|
25 |
all_hands = [] # initializing array to hold the dictionary for the hands
|
26 |
h, w, _ = img.shape # get height and width of image for scaling
|
27 |
if results.multi_hand_landmarks:
|
@@ -43,7 +41,6 @@ def find_hands(brain, img):
|
|
43 |
return 0
|
44 |
|
45 |
|
46 |
-
|
47 |
def is_drawing(index, thumb): # proximity function with arbitrary threshold
|
48 |
npindex = np.array((index[0], index[1]))
|
49 |
npthumb = np.array((thumb[0], thumb[1]))
|
@@ -190,7 +187,8 @@ def show(video): # main
|
|
190 |
|
191 |
return 'any.webm'
|
192 |
|
193 |
-
|
|
|
194 |
iface = gr.Interface(fn=show, inputs=gr.inputs.Video(source="webcam"), outputs='video')
|
195 |
|
196 |
iface.launch(share=True, enable_queue=True)
|
|
|
18 |
|
19 |
def find_hands(brain, img):
|
20 |
if img is not None:
|
|
|
21 |
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # opencv image is in BGR form but mp is trained with RGB
|
22 |
+
results = brain.process(img_rgb) # process finds the hands and outputs classification and 21 landmarks for each hand
|
|
|
23 |
all_hands = [] # initializing array to hold the dictionary for the hands
|
24 |
h, w, _ = img.shape # get height and width of image for scaling
|
25 |
if results.multi_hand_landmarks:
|
|
|
41 |
return 0
|
42 |
|
43 |
|
|
|
44 |
def is_drawing(index, thumb): # proximity function with arbitrary threshold
|
45 |
npindex = np.array((index[0], index[1]))
|
46 |
npthumb = np.array((thumb[0], thumb[1]))
|
|
|
187 |
|
188 |
return 'any.webm'
|
189 |
|
190 |
+
title = 'Air Draw'
|
191 |
+
desc = 'A Mediapipe Hands Wrapper for Drawing in the Air'
|
192 |
iface = gr.Interface(fn=show, inputs=gr.inputs.Video(source="webcam"), outputs='video')
|
193 |
|
194 |
iface.launch(share=True, enable_queue=True)
|