Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
import torch | |
from TTS.api import TTS | |
import tempfile | |
import os | |
import speech_recognition as sr | |
from difflib import SequenceMatcher | |
# Load models | |
qg_pipeline = pipeline("text2text-generation", model="valhalla/t5-small-e2e-qg") | |
tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False) | |
# Extract answer for comparison | |
def extract_answer(question, context): | |
for line in context.split("\n"): | |
if any(word.lower() in line.lower() for word in question.split()[:3]): | |
return line | |
return "" | |
# Generate questions from text | |
def generate_questions(text): | |
output = qg_pipeline(f"generate questions: {text}", num_return_sequences=3) | |
questions = [q["generated_text"] for q in output] | |
return (questions, text, 0) # This is stored in conversation_state | |
# Play the next question | |
def ask_question(state): | |
questions, context, idx = state | |
if idx >= len(questions): | |
return "β All questions asked.", None, state | |
question = questions[idx] | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as fp: | |
tts.tts_to_file(text=question, file_path=fp.name) | |
audio_path = fp.name | |
return question, audio_path, (questions, context, idx + 1) | |
# Transcribe and provide feedback | |
def transcribe_and_feedback(audio_path, state): | |
questions, context, idx = state | |
if idx == 0 or idx > len(questions): | |
return "β Please ask a question first.", state | |
recognizer = sr.Recognizer() | |
with sr.AudioFile(audio_path) as source: | |
audio_data = recognizer.record(source) | |
try: | |
user_answer = recognizer.recognize_google(audio_data) | |
except: | |
return "β Could not understand your answer.", state | |
# Compare with expected answer | |
question = questions[idx - 1] | |
expected = extract_answer(question, context) | |
ratio = SequenceMatcher(None, user_answer.lower(), expected.lower()).ratio() | |
if ratio > 0.6: | |
feedback = f"β Good answer: {user_answer}" | |
else: | |
feedback = f"β Try again. You said: {user_answer}" | |
return feedback, (questions, context, idx) | |
# Gradio UI | |
with gr.Blocks() as app: | |
gr.Markdown("## π Interactive Speaking Practice") | |
with gr.Row(): | |
course_text = gr.Textbox(lines=8, label="π Paste Coursebook Text") | |
gen_btn = gr.Button("π Generate Questions") | |
question_text = gr.Textbox(label="π€ Current Question") | |
question_audio = gr.Audio(label="π Listen to Question", type="filepath") | |
ask_btn = gr.Button("βΆοΈ Ask Next Question") | |
user_audio = gr.Audio(label="ποΈ Your Answer (Record)", sources="microphone", type="filepath") | |
transcribe_btn = gr.Button("π Submit Answer") | |
feedback_output = gr.Textbox(label="π¬ Feedback") | |
conversation_state = gr.State() | |
gen_btn.click(fn=generate_questions, inputs=course_text, outputs=conversation_state) | |
ask_btn.click(fn=ask_question, inputs=conversation_state, outputs=[question_text, question_audio, conversation_state]) | |
transcribe_btn.click(fn=transcribe_and_feedback, inputs=[user_audio, conversation_state], outputs=[feedback_output, conversation_state]) | |
app.launch() |