|
import os |
|
import gradio as gr |
|
from transformers import MarianMTModel, MarianTokenizer |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
|
|
HF_API_TOKEN = os.getenv("HF_API_TOKEN", None) |
|
|
|
|
|
translation_model_name = "Helsinki-NLP/opus-mt-tc-big-en-ta" |
|
|
|
|
|
translation_tokenizer = MarianTokenizer.from_pretrained(translation_model_name) |
|
translation_model = MarianMTModel.from_pretrained(translation_model_name) |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
"runwayml/stable-diffusion-v1-5", |
|
torch_dtype=torch.float16, |
|
revision="fp16", |
|
use_auth_token=HF_API_TOKEN, |
|
) |
|
pipe = pipe.to("cuda") if torch.cuda.is_available() else pipe.to("cpu") |
|
|
|
def translate_tamil_to_english(tamil_text): |
|
|
|
inputs = translation_tokenizer(tamil_text, return_tensors="pt", padding=True) |
|
outputs = translation_model.generate(**inputs) |
|
english_text = translation_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return english_text |
|
|
|
def generate_image_from_text(text): |
|
|
|
image = pipe(text).images[0] |
|
return image |
|
|
|
def translate_and_generate_image(tamil_text): |
|
english_text = translate_tamil_to_english(tamil_text) |
|
image = generate_image_from_text(english_text) |
|
return english_text, image |
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("# Tamil to English Translation + Image Generation") |
|
|
|
tamil_input = gr.Textbox(label="Enter Tamil Text", lines=3) |
|
english_output = gr.Textbox(label="Translated English Text") |
|
generated_image = gr.Image(label="Generated Image") |
|
|
|
translate_btn = gr.Button("Translate and Generate Image") |
|
|
|
translate_btn.click( |
|
fn=translate_and_generate_image, |
|
inputs=[tamil_input], |
|
outputs=[english_output, generated_image] |
|
) |
|
|
|
if __name__ == "__main__": |
|
app.launch(share=True) |
|
|