Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import torch
|
|
8 |
from transformers import AutoModelForCausalLM
|
9 |
from transformers import AutoTokenizer
|
10 |
# from next_word_prediction import GPT2
|
11 |
-
|
12 |
|
13 |
|
14 |
### code snippet
|
@@ -85,24 +85,6 @@ def inference(audio):
|
|
85 |
return getText, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
|
86 |
|
87 |
|
88 |
-
def transcribe(audio, state=""):
|
89 |
-
time.sleep(1)
|
90 |
-
text = p(audio)["text"]
|
91 |
-
state += text + " "
|
92 |
-
return state, state
|
93 |
-
|
94 |
-
gr.Interface(
|
95 |
-
fn=transcribe,
|
96 |
-
inputs=[
|
97 |
-
gr.inputs.Audio(source="microphone", type="filepath"),
|
98 |
-
"state"
|
99 |
-
],
|
100 |
-
outputs=[
|
101 |
-
"textbox",
|
102 |
-
"state"
|
103 |
-
],
|
104 |
-
live=True).launch()
|
105 |
-
|
106 |
|
107 |
css = """
|
108 |
.gradio-container {
|
|
|
8 |
from transformers import AutoModelForCausalLM
|
9 |
from transformers import AutoTokenizer
|
10 |
# from next_word_prediction import GPT2
|
11 |
+
|
12 |
|
13 |
|
14 |
### code snippet
|
|
|
85 |
return getText, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
|
86 |
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
css = """
|
90 |
.gradio-container {
|