pravin007s commited on
Commit
122e2c3
·
verified ·
1 Parent(s): 0bd0a3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -96
app.py CHANGED
@@ -6,115 +6,61 @@ Original file is located at
6
  """
7
 
8
  import os
9
- from huggingface_hub import login
10
-
11
- # Retrieve the actual token from the environment variable
12
- hf_token = os.getenv("HF_TOKEN")
13
-
14
- # Check if the token is retrieved properly
15
- if hf_token:
16
- # Use the retrieved token
17
- login(token=hf_token, add_to_git_credential=True)
18
- else:
19
- raise ValueError("Hugging Face token not found in environment variables.")
20
-
21
- # Import necessary libraries
22
- from transformers import MarianMTModel, MarianTokenizer, pipeline
23
  import requests
24
  import io
25
- from PIL import Image
26
- import matplotlib.pyplot as plt
27
- import gradio as gr
28
 
29
- # Load the translation model and tokenizer
30
  model_name = "Helsinki-NLP/opus-mt-mul-en"
31
  tokenizer = MarianTokenizer.from_pretrained(model_name)
32
  model = MarianMTModel.from_pretrained(model_name)
33
 
34
- # Create a translation pipeline
35
- translator = pipeline("translation", model=model, tokenizer=tokenizer)
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- # Function for translation
38
- def translate_text(tamil_text):
39
- try:
40
- translation = translator(tamil_text, max_length=40)
41
- translated_text = translation[0]['translation_text']
42
- return translated_text
43
- except Exception as e:
44
- return f"An error occurred: {str(e)}"
45
-
46
- # API credentials and endpoint
47
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
48
- headers = {"Authorization": f"Bearer {hf_token}"}
49
-
50
- # Function to send payload and generate image
51
  def generate_image(prompt):
52
- try:
53
- response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
54
-
55
- # Check if the response is successful
56
- if response.status_code == 200:
57
- print("API call successful, generating image...")
58
- image_bytes = response.content
59
-
60
- # Try opening the image
61
- try:
62
- image = Image.open(io.BytesIO(image_bytes))
63
- return image
64
- except Exception as e:
65
- print(f"Error opening image: {e}")
66
- return None
67
- else:
68
- print(f"Failed to get image: Status code {response.status_code}")
69
- print("Response content:", response.text) # Print response for debugging
70
  return None
71
-
72
- except Exception as e:
73
- print(f"An error occurred: {e}")
74
- return None
75
-
76
- # Display image
77
- def show_image(image):
78
- if image:
79
- plt.imshow(image)
80
- plt.axis('off') # Hide axes
81
- plt.show()
82
  else:
83
- print("No image to display")
84
-
85
- # Load GPT-Neo model for creative text generation
86
- from transformers import AutoTokenizer, AutoModelForCausalLM
87
- gpt_neo_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
88
- gpt_neo_model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
89
-
90
- # Function to generate creative text based on translated text
91
- def generate_creative_text(translated_text):
92
- input_ids = gpt_neo_tokenizer(translated_text, return_tensors='pt').input_ids
93
- generated_text_ids = gpt_neo_model.generate(input_ids, max_length=100)
94
- creative_text = gpt_neo_tokenizer.decode(generated_text_ids[0], skip_special_tokens=True)
95
- return creative_text
96
-
97
- # Function to handle the full workflow
98
- def translate_generate_image_and_text(tamil_text):
99
- # Step 1: Translate Tamil text to English
100
- translated_text = translate_text(tamil_text)
101
-
102
- # Step 2: Generate an image based on the translated text
103
- image = generate_image(translated_text)
104
-
105
- # Step 3: Generate creative text based on the translated text
106
- creative_text = generate_creative_text(translated_text)
107
 
108
- return translated_text, creative_text, image
 
 
 
109
 
110
- # Create Gradio interface
111
  interface = gr.Interface(
112
- fn=translate_generate_image_and_text,
113
- inputs="text",
114
- outputs=["text", "text", "image"],
115
- title="Tamil to English Translation, Image Generation & Creative Text",
116
- description="Enter Tamil text to translate to English, generate an image, and create creative text based on the translation."
117
  )
118
 
119
- # Launch Gradio app
120
- interface.launch()
 
6
  """
7
 
8
  import os
9
+ from transformers import MarianMTModel, MarianTokenizer
10
+ import gradio as gr
11
+ from PIL import Image, UnidentifiedImageError
 
 
 
 
 
 
 
 
 
 
 
12
  import requests
13
  import io
 
 
 
14
 
15
+ # Load translation models
16
  model_name = "Helsinki-NLP/opus-mt-mul-en"
17
  tokenizer = MarianTokenizer.from_pretrained(model_name)
18
  model = MarianMTModel.from_pretrained(model_name)
19
 
20
+ # Define language map
21
+ language_map = {
22
+ "Tamil": "ta",
23
+ "Russian": "rus"
24
+ }
25
+
26
+ def translate_text(input_text, selected_language):
27
+ lang_code = language_map[selected_language]
28
+ lang_prefix = f">>{lang_code}<< "
29
+ text_with_lang = lang_prefix + input_text
30
+ inputs = tokenizer(text_with_lang, return_tensors="pt", padding=True)
31
+ translated_tokens = model.generate(**inputs)
32
+ translation = tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
33
+ return translation
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def generate_image(prompt):
36
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
37
+ hf_token = os.getenv("HF_TOKEN")
38
+ headers = {"Authorization": f"Bearer {hf_token}"}
39
+
40
+ response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
41
+
42
+ if response.status_code == 200:
43
+ image_bytes = response.content
44
+ try:
45
+ image = Image.open(io.BytesIO(image_bytes))
46
+ return image
47
+ except UnidentifiedImageError:
 
 
 
 
 
 
48
  return None
 
 
 
 
 
 
 
 
 
 
 
49
  else:
50
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ def process_input(text_input, selected_language):
53
+ translated_output = translate_text(text_input, selected_language)
54
+ image = generate_image(translated_output)
55
+ return translated_output, image
56
 
57
+ # Gradio interface
58
  interface = gr.Interface(
59
+ fn=process_input,
60
+ inputs=[gr.Textbox(label="Input Text"), gr.CheckboxGroup(choices=["Tamil", "Russian"], label="Select Language")],
61
+ outputs=[gr.Textbox(label="Translated Text"), gr.Image(label="Generated Image")],
62
+ title="Multilingual Translation and Image Generation",
63
+ description="Translate Tamil or Russian text to English and generate an image."
64
  )
65
 
66
+ interface.launch()