Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import BlipProcessor, BlipForConditionalGeneration, pipeline
|
|
|
|
|
|
|
3 |
|
4 |
# Load the image captioning model and tokenizer
|
5 |
caption_model_name = "Salesforce/blip-image-captioning-large"
|
@@ -8,9 +11,9 @@ caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_name)
|
|
8 |
|
9 |
# Load the emotion analysis model
|
10 |
emotion_model_name = "SamLowe/roberta-base-go_emotions"
|
11 |
-
emotion_classifier = pipeline("text-classification", model=emotion_model_name)
|
12 |
|
13 |
-
def generate_caption_and_analyze_emotions(image, text=None):
|
14 |
try:
|
15 |
if image is not None:
|
16 |
# Preprocess the image for caption generation
|
@@ -26,7 +29,29 @@ def generate_caption_and_analyze_emotions(image, text=None):
|
|
26 |
|
27 |
# Perform emotion analysis on the generated caption or provided text
|
28 |
results = emotion_classifier(decoded_caption)
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
if sentiment_label == 'neutral':
|
31 |
sentiment_text = "Sentiment of the text is"
|
32 |
else:
|
@@ -35,15 +60,15 @@ def generate_caption_and_analyze_emotions(image, text=None):
|
|
35 |
caption_output = f"Caption: '{decoded_caption}'"
|
36 |
sentiment_output = f"{sentiment_text} {sentiment_label}."
|
37 |
|
38 |
-
return caption_output, sentiment_output
|
39 |
except Exception as e:
|
40 |
-
return f"An error occurred: {e}", ""
|
41 |
|
42 |
# Define the Gradio interface using the new API
|
43 |
-
image_input = gr.
|
44 |
-
text_input = gr.
|
45 |
|
46 |
-
outputs = [gr.
|
47 |
|
48 |
# Create the Gradio app
|
49 |
app = gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=[image_input, text_input], outputs=outputs)
|
@@ -51,4 +76,3 @@ app = gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=[image_input
|
|
51 |
# Launch the Gradio app
|
52 |
if __name__ == "__main__":
|
53 |
app.launch()
|
54 |
-
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import BlipProcessor, BlipForConditionalGeneration, pipeline
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image as PILImage
|
6 |
|
7 |
# Load the image captioning model and tokenizer
|
8 |
caption_model_name = "Salesforce/blip-image-captioning-large"
|
|
|
11 |
|
12 |
# Load the emotion analysis model
|
13 |
emotion_model_name = "SamLowe/roberta-base-go_emotions"
|
14 |
+
emotion_classifier = pipeline("text-classification", model=emotion_model_name, return_all_scores=True)
|
15 |
|
16 |
+
def generate_caption_and_analyze_emotions(image=None, text=None):
|
17 |
try:
|
18 |
if image is not None:
|
19 |
# Preprocess the image for caption generation
|
|
|
29 |
|
30 |
# Perform emotion analysis on the generated caption or provided text
|
31 |
results = emotion_classifier(decoded_caption)
|
32 |
+
|
33 |
+
# Prepare data for visualization
|
34 |
+
labels = [result['label'] for result in results[0]]
|
35 |
+
scores = [result['score'] for result in results[0]]
|
36 |
+
|
37 |
+
# Plot the emotion visualization
|
38 |
+
plt.figure(figsize=(10, 5))
|
39 |
+
plt.bar(labels, scores, color='skyblue')
|
40 |
+
plt.xlabel('Emotions')
|
41 |
+
plt.ylabel('Scores')
|
42 |
+
plt.title('Emotion Analysis')
|
43 |
+
plt.xticks(rotation=45)
|
44 |
+
plt.tight_layout()
|
45 |
+
|
46 |
+
# Save the plot as an image
|
47 |
+
plt_path = "emotion_visualization.png"
|
48 |
+
plt.savefig(plt_path)
|
49 |
+
plt.close()
|
50 |
+
|
51 |
+
# Load the saved image for Gradio
|
52 |
+
vis_image = PILImage.open(plt_path)
|
53 |
+
|
54 |
+
sentiment_label = results[0][0]['label']
|
55 |
if sentiment_label == 'neutral':
|
56 |
sentiment_text = "Sentiment of the text is"
|
57 |
else:
|
|
|
60 |
caption_output = f"Caption: '{decoded_caption}'"
|
61 |
sentiment_output = f"{sentiment_text} {sentiment_label}."
|
62 |
|
63 |
+
return caption_output, sentiment_output, vis_image
|
64 |
except Exception as e:
|
65 |
+
return f"An error occurred: {e}", "", None
|
66 |
|
67 |
# Define the Gradio interface using the new API
|
68 |
+
image_input = gr.Image(label="Upload an image")
|
69 |
+
text_input = gr.Textbox(label="Or enter text", lines=2)
|
70 |
|
71 |
+
outputs = [gr.Textbox(label="Generated Caption"), gr.Textbox(label="Sentiment Analysis"), gr.Image(label="Emotion Visualization")]
|
72 |
|
73 |
# Create the Gradio app
|
74 |
app = gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=[image_input, text_input], outputs=outputs)
|
|
|
76 |
# Launch the Gradio app
|
77 |
if __name__ == "__main__":
|
78 |
app.launch()
|
|