|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
|
|
@st.cache_resource |
|
def load_all_models(): |
|
|
|
trans_model_id = "ai4bharat/indictrans2-indic-en-dist-200M" |
|
tokenizer = AutoTokenizer.from_pretrained(trans_model_id, trust_remote_code=True) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(trans_model_id, trust_remote_code=True) |
|
translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
img_pipe = StableDiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-2-1", |
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
|
revision="fp16" if torch.cuda.is_available() else None, |
|
) |
|
img_pipe = img_pipe.to("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
return tokenizer, model, translation_pipeline, img_pipe |
|
|
|
|
|
def main(): |
|
st.set_page_config(page_title="Tamil to English to Image Generator", layout="centered") |
|
st.title("πΈ Tamil β English β AI Image Generator") |
|
st.markdown("Translate Tamil text to English and generate an image from it!") |
|
|
|
|
|
with st.spinner("Loading models..."): |
|
tokenizer, model, translation_pipeline, img_pipe = load_all_models() |
|
|
|
|
|
tamil_text = st.text_area("Enter Tamil text here:", height=150) |
|
|
|
if st.button("Generate Image"): |
|
if tamil_text.strip() == "": |
|
st.warning("Please enter some Tamil text.") |
|
return |
|
|
|
|
|
with st.spinner("Translating to English..."): |
|
translated = translation_pipeline(tamil_text, src_lang="ta", tgt_lang="en")[0]["translation_text"] |
|
st.success(f"π€ English Translation: `{translated}`") |
|
|
|
|
|
with st.spinner("Generating image..."): |
|
image = img_pipe(prompt=translated).images[0] |
|
st.image(image, caption="Generated Image", use_column_width=True) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|