File size: 2,145 Bytes
e97aebb
59921cd
803ddb4
 
59921cd
 
9e730f5
f77ae83
c652e39
9e730f5
59921cd
e5964e8
9e730f5
 
 
 
 
 
 
 
 
 
e97aebb
9e730f5
 
 
 
 
 
e97aebb
9e730f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e97aebb
9e730f5
 
 
 
 
 
 
e97aebb
9e730f5
e97aebb
 
 
f77ae83
 
9e730f5
f77ae83
 
 
9e730f5
e97aebb
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch
import os

# 1. Use Hugging Face token securely
HF_TOKEN = os.getenv("HF_TOKEN", None)

# 2. Set device
device = "cuda" if torch.cuda.is_available() else "cpu"

# 3. Load translator (Tamil → English using multilingual model)
try:
    translator = pipeline(
        "translation",
        model="Helsinki-NLP/opus-mt-mul-en",
        use_auth_token=HF_TOKEN
    )
except Exception as e:
    translator = None
    print(f"Error loading translator: {e}")

# 4. Load GPT2 for English text generation
try:
    generator = pipeline("text-generation", model="gpt2")
except Exception as e:
    generator = None
    print(f"Error loading GPT2: {e}")

# 5. Load Stable Diffusion for image generation
try:
    image_pipe = StableDiffusionPipeline.from_pretrained(
        "CompVis/stable-diffusion-v1-4",
        use_auth_token=HF_TOKEN,
        torch_dtype=torch.float16 if device == "cuda" else torch.float32
    ).to(device)
except Exception as e:
    image_pipe = None
    print(f"Error loading Stable Diffusion: {e}")

# 6. Full pipeline function with safe error handling
def generate_image_from_tamil(tamil_text):
    if not translator or not generator or not image_pipe:
        return "Model load error", "Model load error", None

    try:
        translated = translator(tamil_text, max_length=100)[0]['translation_text']
        prompt = generator(translated, max_length=50, num_return_sequences=1)[0]['generated_text']
        image = image_pipe(prompt).images[0]
        return translated, prompt, image
    except Exception as e:
        return f"Translation/Image generation error: {str(e)}", "", None

# 7. Gradio UI
iface = gr.Interface(
    fn=generate_image_from_tamil,
    inputs=gr.Textbox(lines=2, label="Enter Tamil Text"),
    outputs=[
        gr.Textbox(label="Translated English Text"),
        gr.Textbox(label="Generated Prompt"),
        gr.Image(label="Generated Image")
    ],
    title="Tamil to Image Generator",
    description="Translate Tamil ➜ Generate English Text ➜ Create Image"
)

iface.launch()