gokilashree's picture
Update app.py
e81542b verified
raw
history blame
3.99 kB
import openai
from transformers import MBartForConditionalGeneration, MBart50Tokenizer
import gradio as gr
import requests
import io
from PIL import Image
import os
# Set up your OpenAI API key (make sure it's stored as an environment variable)
openai_api_key = os.getenv("OPENAI_API_KEY")
if openai_api_key is None:
raise ValueError("OpenAI API key not found! Please set 'OPENAI_API_KEY' environment variable.")
else:
openai.api_key = openai_api_key
# Load the translation model and tokenizer
model_name = "facebook/mbart-large-50-many-to-one-mmt"
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name)
# Use the Hugging Face API key from environment variables for text-to-image model
hf_api_key = os.getenv("full_token")
if hf_api_key is None:
raise ValueError("Hugging Face API key not found! Please set 'hf_token' environment variable.")
else:
headers = {"Authorization": f"Bearer {hf_api_key}"}
API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
# Define the OpenAI GPT-3 text generation function with error handling
def generate_with_gpt3(prompt, max_tokens=150, temperature=0.7):
try:
response = openai.Completion.create(
engine="text-davinci-003", # Use "text-davinci-003" for high-quality outputs
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=0.9,
frequency_penalty=0.0,
presence_penalty=0.0
)
return response.choices[0].text.strip()
except Exception as e:
print(f"OpenAI API Error: {e}")
return "Error generating text with GPT-3. Check the OpenAI API settings."
# Define the translation, GPT-3 text generation, and image generation function
def translate_and_generate_image(tamil_text):
try:
# Step 1: Translate Tamil text to English using mbart-large-50
tokenizer.src_lang = "ta_IN"
inputs = tokenizer(tamil_text, return_tensors="pt")
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
except Exception as e:
return "Error during translation: " + str(e), "", None
try:
# Step 2: Generate high-quality descriptive text using OpenAI's GPT-3
prompt = f"Create a detailed and creative description based on the following text: {translated_text}"
generated_text = generate_with_gpt3(prompt, max_tokens=150, temperature=0.7)
except Exception as e:
return translated_text, f"Error during text generation: {e}", None
try:
# Step 3: Use the generated English text to create an image
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
response.raise_for_status() # Raise error if request fails
return response.content
# Generate image using the generated text
image_bytes = query({"inputs": generated_text})
image = Image.open(io.BytesIO(image_bytes))
except Exception as e:
return translated_text, generated_text, f"Error during image generation: {e}"
return translated_text, generated_text, image
# Gradio interface setup
iface = gr.Interface(
fn=translate_and_generate_image,
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
outputs=[gr.Textbox(label="Translated English Text"),
gr.Textbox(label="Generated Descriptive Text"),
gr.Image(label="Generated Image")],
title="Tamil to English Translation, GPT-3 Text Generation, and Image Creation",
description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate high-quality text using GPT-3, and create an image using the generated text.",
)
# Launch Gradio app without `share=True`
iface.launch()