|
import gradio as gr |
|
from transformers import pipeline |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
import os |
|
|
|
|
|
HF_TOKEN = os.getenv("HF_TOKEN", None) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
try: |
|
translator = pipeline( |
|
"translation", |
|
model="Helsinki-NLP/opus-mt-mul-en", |
|
use_auth_token=HF_TOKEN |
|
) |
|
except Exception as e: |
|
translator = None |
|
print(f"Error loading translator: {e}") |
|
|
|
|
|
try: |
|
generator = pipeline("text-generation", model="gpt2") |
|
except Exception as e: |
|
generator = None |
|
print(f"Error loading GPT2: {e}") |
|
|
|
|
|
try: |
|
image_pipe = StableDiffusionPipeline.from_pretrained( |
|
"CompVis/stable-diffusion-v1-4", |
|
use_auth_token=HF_TOKEN, |
|
torch_dtype=torch.float16 if device == "cuda" else torch.float32 |
|
).to(device) |
|
except Exception as e: |
|
image_pipe = None |
|
print(f"Error loading Stable Diffusion: {e}") |
|
|
|
|
|
def generate_image_from_tamil(tamil_text): |
|
if not translator or not generator or not image_pipe: |
|
return "Model load error", "Model load error", None |
|
|
|
try: |
|
translated = translator(tamil_text, max_length=100)[0]['translation_text'] |
|
prompt = generator(translated, max_length=50, num_return_sequences=1)[0]['generated_text'] |
|
image = image_pipe(prompt).images[0] |
|
return translated, prompt, image |
|
except Exception as e: |
|
return f"Translation/Image generation error: {str(e)}", "", None |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_image_from_tamil, |
|
inputs=gr.Textbox(lines=2, label="Enter Tamil Text"), |
|
outputs=[ |
|
gr.Textbox(label="Translated English Text"), |
|
gr.Textbox(label="Generated Prompt"), |
|
gr.Image(label="Generated Image") |
|
], |
|
title="Tamil to Image Generator", |
|
description="Translate Tamil β Generate English Text β Create Image" |
|
) |
|
|
|
iface.launch() |
|
|