Spaces:
Sleeping
Sleeping
File size: 3,795 Bytes
0791989 a18970f 75c37a0 a18970f 75c37a0 a18970f 47454a7 75c37a0 47454a7 a18970f 47454a7 a18970f 75c37a0 a18970f 75c37a0 a18970f 122e2c3 47454a7 b5054e8 a18970f b5054e8 a18970f b5054e8 f34d5e4 b5054e8 f34d5e4 a18970f 47454a7 a18970f 47454a7 a18970f 47454a7 a18970f 122e2c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
from huggingface_hub import login
# Retrieve the actual token from the environment variable
hf_token = os.getenv("HF_TOKEN")
# Check if the token is retrieved properly
if hf_token:
# Use the retrieved token
login(token=hf_token, add_to_git_credential=True)
else:
raise ValueError("Hugging Face token not found in environment variables.")
# Import necessary libraries
from transformers import MarianMTModel, MarianTokenizer, pipeline
import requests
import io
from PIL import Image
import matplotlib.pyplot as plt
import gradio as gr
# Load the translation model and tokenizer
model_name = "Helsinki-NLP/opus-mt-mul-en"
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
# Create a translation pipeline
translator = pipeline("translation", model=model, tokenizer=tokenizer)
# Function for translation
def translate_text(tamil_text):
try:
translation = translator(tamil_text, max_length=40)
translated_text = translation[0]['translation_text']
return translated_text
except Exception as e:
return f"An error occurred: {str(e)}"
# API credentials and endpoint
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
headers = {"Authorization": f"Bearer {hf_token}"}
# Function to send payload and generate image
def generate_image(prompt):
try:
response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
# Check if the response is successful
if response.status_code == 200:
print("API call successful, generating image...")
image_bytes = response.content
# Try opening the image
try:
image = Image.open(io.BytesIO(image_bytes))
return image
except Exception as e:
print(f"Error opening image: {e}")
return None
else:
print(f"Failed to get image: Status code {response.status_code}")
print("Response content:", response.text) # Print response for debugging
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
# Import necessary libraries for Mistral model
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load Mistral model and tokenizer
mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
mistral_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
# Function to generate creative text based on translated text using Mistral
def generate_creative_text(translated_text):
input_ids = mistral_tokenizer(translated_text, return_tensors='pt').input_ids
generated_text_ids = mistral_model.generate(input_ids, max_length=100)
creative_text = mistral_tokenizer.decode(generated_text_ids[0], skip_special_tokens=True)
return creative_text
# Function to handle the full workflow
def translate_generate_image_and_text(tamil_text):
# Step 1: Translate Tamil text to English
translated_text = translate_text(tamil_text)
# Step 2: Generate an image based on the translated text
image = generate_image(translated_text)
# Step 3: Generate creative text based on the translated text
creative_text = generate_creative_text(translated_text)
return translated_text, creative_text, image
# Create Gradio interface
interface = gr.Interface(
fn=translate_generate_image_and_text,
inputs="text",
outputs=["text", "text", "image"],
title="Tamil to English Translation, Image Generation & Creative Text",
description="Enter Tamil text to translate to English, generate an image, and create creative text based on the translation."
)
# Launch Gradio app
interface.launch()
|