import streamlit as st import os import requests # --- CONFIG --- SIGN_IMAGE_FOLDER = "sign_images" HF_TOKEN = os.getenv("HF_API_TOKEN") if not HF_TOKEN: st.error("Hugging Face API token not found. Please set 'HF_API_TOKEN' in your environment.") st.stop() HF_MODEL = "facebook/mbart-large-50-many-to-many-mmt" # --- STREAMLIT UI --- st.set_page_config(page_title="DeafTranslator", layout="centered") st.title("🤟 DeafTranslator") st.markdown(""" Translate **Urdu ↔ English** and view it in **Sign Language** (ASL). You can also input ASL signs manually to reverse translate them into English and then Urdu. """) # --- HELPER FUNCTIONS --- def hf_translate(text, src_lang, tgt_lang): headers = {"Authorization": f"Bearer {HF_TOKEN}"} payload = { "inputs": text, "parameters": {"src_lang": src_lang, "tgt_lang": tgt_lang}, "options": {"wait_for_model": True} } url = f"https://api-inference.huggingface.co/models/{HF_MODEL}" try: response = requests.post(url, headers=headers, json=payload) # Check if the response is empty or invalid if response.status_code != 200: return f"Error: Received status code {response.status_code}" result = response.json() if not result or not isinstance(result, list): return f"Error: Invalid response format. {response.text}" return result[0]["translation_text"] except Exception as e: return f"Error: {e}" def find_image_path(name): for ext in ["jpg", "JPG"]: path = os.path.join(SIGN_IMAGE_FOLDER, f"{name}.{ext}") if os.path.exists(path): return path return None def display_sign_language(text): st.markdown("### 👐 Sign Language Representation") if not text: st.warning("No translation available to display in ASL.") return words = text.lower().split() for word in words: st.markdown(f"**{word}**") cols = st.columns(len(word)) for i, char in enumerate(word): if char.isalpha(): img_path = find_image_path(char) if img_path: cols[i].image(img_path, caption=char.upper(), width=70) else: cols[i].warning(f"Missing: {char.upper()}") # Show space image between words space_img = find_image_path("space") if space_img: st.image(space_img, width=40) def render_sign_input(): st.markdown("### ✋ Manual Sign Input (A–Z & Space)") selected_chars = st.multiselect( "Select ASL Signs in Order", options=[chr(i) for i in range(65, 91)] + ["space"], default=[] ) return selected_chars # --- TRANSLATION SECTION --- tab1, tab2 = st.tabs(["Urdu → English → ASL", "ASL → English → Urdu"]) with tab1: urdu_text = st.text_area("📝 Enter Urdu Text", height=150) if st.button("🔄 Translate & Show Signs"): if not urdu_text.strip(): st.warning("Please enter Urdu text.") else: with st.spinner("Translating to English..."): english = hf_translate(urdu_text, "ur_PK", "en_XX") if "Error" in english or "Translation failed" in english: st.error(f"Translation failed: {english}") else: st.markdown("### ✅ English Translation") st.success(english) display_sign_language(english) with tab2: sign_sequence = render_sign_input() if st.button("🔁 Translate Signs"): if not sign_sequence: st.warning("Please select at least one sign.") else: english_text = " ".join( ''.join(c.lower() for c in group) if "space" not in group else ''.join(c.lower() for c in group if c != "space") for group in "".join(sign_sequence).split("space") ) st.markdown("### 🔡 Interpreted English") st.success(english_text) with st.spinner("Translating to Urdu..."): urdu = hf_translate(english_text, "en_XX", "ur_PK") if "Error" in urdu or "Translation failed" in urdu: st.error(f"Translation failed: {urdu}") else: st.markdown("### 🌐 Urdu Translation") st.success(urdu)