Beijuka commited on
Commit
3f5ce70
·
verified ·
1 Parent(s): ebb16f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -47
app.py CHANGED
@@ -81,52 +81,61 @@ def save_feedback(audio_file, transcription, user_id, lang, env, device, domain,
81
  # Gradio UI
82
  with gr.Blocks() as demo:
83
  gr.Markdown("## African ASR Evaluation Platform")
84
-
85
- with gr.Row():
86
- audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Upload or record audio")
87
- lang = gr.Dropdown(list(model_map.keys()), label="Select Language",value=None )
88
- user_id = gr.Textbox(label="Please enter user ID.*")
89
-
90
- transcribed_text = gr.Textbox(label="Transcribed Text")
91
- submit_btn = gr.Button("Transcribe")
92
- submit_btn.click(fn=transcribe, inputs=[audio_input, lang], outputs=[transcribed_text, audio_input])
93
-
94
- gr.Markdown("---\n## Feedback Form")
95
- user_id = gr.Textbox(label="Please enter user ID.*")
96
- env = gr.Dropdown(["Studio/Professional Recording", "Quiet Room (minimal noise)", "Noisy Background (e.g., street, cafe, market)","Other"], label="What was the type of recording environment for the speech you evaluated? *",value=None)
97
- device = gr.Dropdown(["Mobile Phone/Tablet", "Laptop/Computer Microphone", "Dedicated Microphone (e.g., headset, studio mic)", "Other"], label="What type of recording device was used? *",value=None)
98
- domain = gr.Textbox(label="Was the speech related to a specific topic? If yes, please specify the topic (e.g., news, education, medical, law, religious, sports, science).")
99
- accuracy = gr.Slider(1, 10, step=1, label="Overall, how accurate was the model's transcription for the audio you reviewed? *")
100
- transcript_edit = gr.Textbox(label="If the transcription provided by the model was incorrect, please enter your corrected version.")
101
- orthography = gr.Radio(["Yes, mostly correct", "No, major issues", "Partially (some correct, some incorrect)", "Not Applicable"], label="Did the transcription correctly use the standard orthography (including accents, diacritics, special characters) for the language?",value=None)
102
- orthography_issues = gr.Textbox(label="If you selected \"No\" or \"Partially\", please describe any significant orthography issues you noticed.")
103
- meaning = gr.Slider(1, 5, step=1, label="Did the model's transcription preserve the original meaning of the speech? *")
104
- meaning_loss = gr.Textbox(label="If the meaning was not fully preserved (i.e., you rated 1-4 above), please briefly explain how it was changed or lost.")
105
- errors = gr.CheckboxGroup([
106
- "Substitutions (wrong words used)",
107
- "Omissions (words missing)",
108
- "Insertions (extra words added)",
109
- "Pronunciation-related errors (phonetically plausible but wrong word/spelling)",
110
- "Diacritic/Tone/Special Character errors",
111
- "Code-switching errors (mixing languages incorrectly)",
112
- "Named Entity errors (names of people/places wrong)",
113
- "Punctuation errors",
114
- "No significant errors observed"
115
- ] , label="Which types of errors were most prominent or impactful in the transcriptions? *", value=[])
116
- error_examples = gr.Textbox(label="(Optional) Can you provide 1-2 examples of significant errors and how you would correct them?")
117
- performance = gr.Textbox(label="Please describe the model's performance in your own words. What did it do well? What did it struggle with? *")
118
-
119
- save_btn = gr.Button("Submit Feedback")
120
- output_msg = gr.Textbox(interactive=False)
121
- save_btn.click(
122
- fn=save_feedback,
123
- inputs=[
124
- audio_input, transcribed_text, user_id, lang, env, device, domain, accuracy,
125
- transcript_edit, orthography, orthography_issues,
126
- meaning, meaning_loss, errors, error_examples, performance
127
- ],
128
- outputs=[output_msg]
129
- )
130
-
 
 
 
 
 
 
 
 
 
131
  # Launch the interface
132
  demo.launch()
 
81
  # Gradio UI
82
  with gr.Blocks() as demo:
83
  gr.Markdown("## African ASR Evaluation Platform")
84
+
85
+ user_id = gr.Textbox(label="Please enter user ID. *")
86
+ proceed_btn = gr.Button("Proceed")
87
+
88
+ with gr.Group(visible=False) as main_ui:
89
+ with gr.Row():
90
+ audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Upload or record audio")
91
+ lang = gr.Dropdown(list(model_map.keys()), label="Select Language", value=None)
92
+
93
+ transcribed_text = gr.Textbox(label="Transcribed Text")
94
+ submit_btn = gr.Button("Transcribe")
95
+ submit_btn.click(fn=transcribe, inputs=[audio_input, lang], outputs=[transcribed_text, audio_input])
96
+
97
+ gr.Markdown("---\n## Feedback Form")
98
+ env = gr.Dropdown(["Studio/Professional Recording", "Quiet Room (minimal noise)", "Noisy Background (e.g., street, cafe, market)","Other"], label="What was the type of recording environment for the speech you evaluated? *",value=None)
99
+ device = gr.Dropdown(["Mobile Phone/Tablet", "Laptop/Computer Microphone", "Dedicated Microphone (e.g., headset, studio mic)", "Other"], label="What type of recording device was used? *",value=None)
100
+ domain = gr.Textbox(label="Was the speech related to a specific topic? If yes, please specify the topic (e.g., news, education, medical, law, religious, sports, science).")
101
+ accuracy = gr.Slider(1, 10, step=1, label="Overall, how accurate was the model's transcription for the audio you reviewed? *")
102
+ transcript_edit = gr.Textbox(label="If the transcription provided by the model was incorrect, please enter your corrected version.")
103
+ orthography = gr.Radio(["Yes, mostly correct", "No, major issues", "Partially (some correct, some incorrect)", "Not Applicable"], label="Did the transcription correctly use the standard orthography (including accents, diacritics, special characters) for the language?",value=None)
104
+ orthography_issues = gr.Textbox(label="If you selected \"No\" or \"Partially\", please describe any significant orthography issues you noticed.")
105
+ meaning = gr.Slider(1, 5, step=1, label="Did the model's transcription preserve the original meaning of the speech? *")
106
+ meaning_loss = gr.Textbox(label="If the meaning was not fully preserved (i.e., you rated 1-4 above), please briefly explain how it was changed or lost.")
107
+ errors = gr.CheckboxGroup([
108
+ "Substitutions (wrong words used)",
109
+ "Omissions (words missing)",
110
+ "Insertions (extra words added)",
111
+ "Pronunciation-related errors (phonetically plausible but wrong word/spelling)",
112
+ "Diacritic/Tone/Special Character errors",
113
+ "Code-switching errors (mixing languages incorrectly)",
114
+ "Named Entity errors (names of people/places wrong)",
115
+ "Punctuation errors",
116
+ "No significant errors observed"
117
+ ] , label="Which types of errors were most prominent or impactful in the transcriptions? *", value=[])
118
+ error_examples = gr.Textbox(label="(Optional) Can you provide 1-2 examples of significant errors and how you would correct them?")
119
+ performance = gr.Textbox(label="Please describe the model's performance in your own words. What did it do well? What did it struggle with? *")
120
+
121
+ save_btn = gr.Button("Submit Feedback")
122
+ output_msg = gr.Textbox(interactive=False)
123
+
124
+ save_btn.click(
125
+ fn=save_feedback,
126
+ inputs=[
127
+ audio_input, transcribed_text, user_id, lang, env, device, domain, accuracy,
128
+ transcript_edit, orthography, orthography_issues,
129
+ meaning, meaning_loss, errors, error_examples, performance
130
+ ],
131
+ outputs=[output_msg]
132
+ )
133
+ def reveal_ui(user_input):
134
+ if user_input.strip():
135
+ return gr.update(visible=True)
136
+ else:
137
+ return gr.update(visible=False)
138
+
139
+ proceed_btn.click(fn=reveal_ui, inputs=[user_id], outputs=[main_ui])
140
  # Launch the interface
141
  demo.launch()