File size: 4,206 Bytes
47454a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# -*- coding: utf-8 -*-
"""gen ai project f.ipynb

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1iF7hdOjWNeFUtGvUYdaFsBErJGnY1h5J
"""

from huggingface_hub import login

login(token="hf_gen")

# Import necessary libraries
from transformers import MarianMTModel, MarianTokenizer, pipeline

# Load the translation model and tokenizer
model_name = "Helsinki-NLP/opus-mt-mul-en"
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)

# Create a translation pipeline
translator = pipeline("translation", model=model, tokenizer=tokenizer)

# Function for translation
def translate_text(tamil_text):
    try:
        # Perform translation
        translation = translator(tamil_text, max_length=40)
        translated_text = translation[0]['translation_text']
        return translated_text
    except Exception as e:
        return f"An error occurred: {str(e)}"

# Test translation with example Tamil text
tamil_text = "மழையுடன் ஒரு பூ"  # "A flower with rain"
translated_text = translate_text(tamil_text)
print(f"Translated Text: {translated_text}")

import requests
import io
from PIL import Image
import matplotlib.pyplot as plt

# API credentials and endpoint
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
headers = {"Authorization": "Bearer hf_gen"}

# Function to send payload and generate image
def generate_image(prompt):
    try:
        # Send request to API
        response = requests.post(API_URL, headers=headers, json={"inputs": prompt})

        # Check if the response is successful
        if response.status_code == 200:
            print("API call successful, generating image...")
            image_bytes = response.content

            # Try opening the image
            try:
                image = Image.open(io.BytesIO(image_bytes))
                return image
            except Exception as e:
                print(f"Error opening image: {e}")
        else:
            # Handle non-200 responses
            print(f"Failed to get image: Status code {response.status_code}")
            print("Response content:", response.text)  # Print response for debugging

    except Exception as e:
        print(f"An error occurred: {e}")

# Display image
def show_image(image):
    if image:
        plt.imshow(image)
        plt.axis('off')  # Hide axes
        plt.show()
    else:
        print("No image to display")

# Test the function with a prompt
prompt = "A flower with rain"
image = generate_image(prompt)

# Display the generated image
show_image(image)

from transformers import AutoTokenizer, AutoModelForCausalLM

# Load GPT-Neo model for creative text generation
gpt_neo_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
gpt_neo_model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")

# Function to generate creative text based on translated text
def generate_creative_text(translated_text):
    input_ids = gpt_neo_tokenizer(translated_text, return_tensors='pt').input_ids
    generated_text_ids = gpt_neo_model.generate(input_ids, max_length=100)
    creative_text = gpt_neo_tokenizer.decode(generated_text_ids[0], skip_special_tokens=True)
    return creative_text

import gradio as gr

# Function to handle the full workflow
def translate_generate_image_and_text(tamil_text):
    # Step 1: Translate Tamil text to English
    translated_text = translate_text(tamil_text)

    # Step 2: Generate an image based on the translated text
    image = generate_image(translated_text)

    # Step 3: Generate creative text based on the translated text
    creative_text = generate_creative_text(translated_text)

    return translated_text, creative_text, image

# Create Gradio interface
interface = gr.Interface(
    fn=translate_generate_image_and_text,
    inputs="text",
    outputs=["text", "text", "image"],
    title="Tamil to English Translation, Image Generation & Creative Text",
    description="Enter Tamil text to translate to English, generate an image, and create creative text based on the translation."
)

# Launch Gradio app
interface.launch()