amyakir commited on
Commit
9d04a20
Β·
verified Β·
1 Parent(s): ec2cf2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -0
app.py CHANGED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import torch
4
+ from TTS.api import TTS
5
+ import tempfile
6
+ import os
7
+ import speech_recognition as sr
8
+ from difflib import SequenceMatcher
9
+
10
+ # Load models
11
+ qg_pipeline = pipeline("text2text-generation", model="valhalla/t5-small-e2e-qg")
12
+ tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False)
13
+
14
+ # Simulate QA by extracting key sentence from input text (placeholder)
15
+ def extract_answer(question, context):
16
+ for line in context.split("\n"):
17
+ if any(word.lower() in line.lower() for word in question.split()[:3]):
18
+ return line
19
+ return ""
20
+
21
+ def generate_questions(text):
22
+ output = qg_pipeline(f"generate questions: {text}", num_return_sequences=3)
23
+ questions = [q["generated_text"] for q in output]
24
+ return (questions, text, 0) # this tuple is stored in state
25
+
26
+ def ask_question(state):
27
+ questions, context, idx = state
28
+ if idx >= len(questions):
29
+ return "βœ… All questions asked.", None, state
30
+
31
+ question = questions[idx]
32
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as fp:
33
+ tts.tts_to_file(text=question, file_path=fp.name)
34
+ audio_path = fp.name
35
+
36
+ return question, audio_path, (questions, context, idx + 1)
37
+
38
+ def transcribe_and_feedback(audio_path, state):
39
+ questions, context, idx = state
40
+ if idx == 0 or idx > len(questions):
41
+ return "Please ask a question first.", state
42
+
43
+ recognizer = sr.Recognizer()
44
+ with sr.AudioFile(audio_path) as source:
45
+ audio_data = recognizer.record(source)
46
+ try:
47
+ user_answer = recognizer.recognize_google(audio_data)
48
+ except:
49
+ return "❌ Could not understand your answer.", state
50
+
51
+ # Compare with expected answer
52
+ question = questions[idx - 1] # subtract 1 because idx was already incremented
53
+ expected = extract_answer(question, context)
54
+ ratio = SequenceMatcher(None, user_answer.lower(), expected.lower()).ratio()
55
+
56
+ if ratio > 0.6:
57
+ feedback = f"βœ… Good answer: {user_answer}"
58
+ else:
59
+ feedback = f"❌ Try again. You said: {user_answer}"
60
+
61
+ return feedback, (questions, context, idx)
62
+
63
+ with gr.Blocks() as app:
64
+ gr.Markdown("### πŸŽ“ Interactive Speaking Practice with Coursebook Dialogues")
65
+
66
+ with gr.Row():
67
+ course_text = gr.Textbox(lines=8, label="πŸ“˜ Paste Coursebook Text")
68
+ gen_btn = gr.Button("πŸ”„ Generate Questions")
69
+
70
+ question_text = gr.Textbox(label="πŸŽ™οΈ Current Question")
71
+ question_audio = gr.Audio(label="πŸ”Š Listen to Question", type="filepath")
72
+ ask_btn = gr.Button("▢️ Ask Next Question")
73
+
74
+ user_audio = gr.Audio(label="🎧 Your Spoken Answer", sources="microphone", type="filepath")
75
+ transcribe_btn = gr.Button("πŸ“ Submit Answer")
76
+ feedback_output = gr.Textbox(label="πŸ—¨οΈ Feedback")
77
+
78
+ conversation_state = gr.State()
79
+
80
+ gen_btn.click(fn=generate_questions, inputs=course_text, outputs=conversation_state)
81
+ ask_btn.click(fn=ask_question, inputs=conversation_state, outputs=[question_text, question_audio, conversation_state])
82
+ transcribe_btn.click(fn=transcribe_and_feedback, inputs=[user_audio, conversation_state], outputs=[feedback_output, conversation_state])
83
+
84
+ app.launch()