File size: 1,663 Bytes
e97aebb 803ddb4 e5964e8 e97aebb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
from diffusers import StableDiffusionPipeline
import torch
# 1. Tamil to English Translator
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ta-en")
# 2. English Text Generator (you can use GPT2 or any causal model)
generator = pipeline("text-generation", model="gpt2")
# 3. Image Generator using Stable Diffusion
device = "cuda" if torch.cuda.is_available() else "cpu"
image_pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
torch_dtype=torch.float16 if device == "cuda" else torch.float32
)
image_pipe = image_pipe.to(device)
# π Combined function
def generate_image_from_tamil(tamil_input):
# Step 1: Translate Tamil β English
translated = translator(tamil_input, max_length=100)[0]['translation_text']
# Step 2: Generate English sentence based on translated input
generated = generator(translated, max_length=50, num_return_sequences=1)[0]['generated_text']
# Step 3: Generate Image based on English text
image = image_pipe(generated).images[0]
return translated, generated, image
# π¨ Gradio UI
iface = gr.Interface(
fn=generate_image_from_tamil,
inputs=gr.Textbox(lines=2, label="Enter Tamil Text"),
outputs=[
gr.Textbox(label="Translated English Text"),
gr.Textbox(label="Generated English Prompt"),
gr.Image(label="Generated Image")
],
title="Tamil to Image Generator π
",
description="Translates Tamil β English, generates story β creates image using Stable Diffusion."
)
iface.launch()
|