24Sureshkumar's picture
Update app.py
9e730f5 verified
raw
history blame
2.15 kB
import gradio as gr
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch
import os
# 1. Use Hugging Face token securely
HF_TOKEN = os.getenv("HF_TOKEN", None)
# 2. Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# 3. Load translator (Tamil β†’ English using multilingual model)
try:
translator = pipeline(
"translation",
model="Helsinki-NLP/opus-mt-mul-en",
use_auth_token=HF_TOKEN
)
except Exception as e:
translator = None
print(f"Error loading translator: {e}")
# 4. Load GPT2 for English text generation
try:
generator = pipeline("text-generation", model="gpt2")
except Exception as e:
generator = None
print(f"Error loading GPT2: {e}")
# 5. Load Stable Diffusion for image generation
try:
image_pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
use_auth_token=HF_TOKEN,
torch_dtype=torch.float16 if device == "cuda" else torch.float32
).to(device)
except Exception as e:
image_pipe = None
print(f"Error loading Stable Diffusion: {e}")
# 6. Full pipeline function with safe error handling
def generate_image_from_tamil(tamil_text):
if not translator or not generator or not image_pipe:
return "Model load error", "Model load error", None
try:
translated = translator(tamil_text, max_length=100)[0]['translation_text']
prompt = generator(translated, max_length=50, num_return_sequences=1)[0]['generated_text']
image = image_pipe(prompt).images[0]
return translated, prompt, image
except Exception as e:
return f"Translation/Image generation error: {str(e)}", "", None
# 7. Gradio UI
iface = gr.Interface(
fn=generate_image_from_tamil,
inputs=gr.Textbox(lines=2, label="Enter Tamil Text"),
outputs=[
gr.Textbox(label="Translated English Text"),
gr.Textbox(label="Generated Prompt"),
gr.Image(label="Generated Image")
],
title="Tamil to Image Generator",
description="Translate Tamil ➜ Generate English Text ➜ Create Image"
)
iface.launch()