import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM from gtts import gTTS import tempfile from langdetect import detect # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") def respond(user_input): if not user_input: return "Please enter a question.", None # Detect language try: detected_lang = detect(user_input) except: detected_lang = "en" # Generate AI response prompt = f"[INST] {user_input} [/INST]" inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Convert to speech try: tts = gTTS(text=response, lang='hi' if detected_lang == 'hi' else 'en') with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as fp: tts.save(fp.name) audio_path = fp.name except Exception as e: audio_path = None return response, audio_path # UI iface = gr.Interface( fn=respond, inputs=gr.Textbox(lines=2, placeholder="Ask anything...", label="user_input"), outputs=[ gr.Textbox(label="TeachMe Says"), gr.Audio(label="Voice", type="filepath", autoplay=True) ], title="TeachMe - Your Smart Tutor", description="Light AI bot with Hindi + English voice support." ) iface.launch()