Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,9 @@ matplotlib.use('Agg') # Set backend for Matplotlib to avoid GUI conflicts with G
|
|
7 |
import matplotlib.pyplot as plt
|
8 |
import time # For profiling if needed
|
9 |
|
|
|
|
|
|
|
10 |
# --- Module Imports ---
|
11 |
from gradio_utils import get_url_user_token
|
12 |
|
@@ -160,6 +163,47 @@ def update_analytics_plots_figures(token_state_value, date_filter_option, custom
|
|
160 |
return [error_msg] + placeholder_figs
|
161 |
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
# --- Gradio UI Blocks ---
|
164 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
|
165 |
title="LinkedIn Organization Dashboard") as app:
|
@@ -193,6 +237,37 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
|
|
193 |
sync_status_html_output = gr.HTML("<p style='text-align:center;'>Stato sincronizzazione...</p>")
|
194 |
dashboard_display_html = gr.HTML("<p style='text-align:center;'>Caricamento dashboard...</p>")
|
195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
org_urn_display.change(
|
197 |
fn=initial_load_sequence,
|
198 |
inputs=[url_user_token_display, org_urn_display, token_state],
|
@@ -543,5 +618,6 @@ if __name__ == "__main__":
|
|
543 |
except ImportError:
|
544 |
logging.error("Matplotlib non è installato. I grafici non verranno generati.")
|
545 |
|
|
|
|
|
546 |
app.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|
547 |
-
|
|
|
7 |
import matplotlib.pyplot as plt
|
8 |
import time # For profiling if needed
|
9 |
|
10 |
+
# --- Add Gemini Import ---
|
11 |
+
import google.generativeai as genai
|
12 |
+
|
13 |
# --- Module Imports ---
|
14 |
from gradio_utils import get_url_user_token
|
15 |
|
|
|
163 |
return [error_msg] + placeholder_figs
|
164 |
|
165 |
|
166 |
+
# --- Gemini Test Function (simple as requested, with prompt input) ---
|
167 |
+
def call_gemini_api_direct_with_prompt(user_prompt):
|
168 |
+
"""
|
169 |
+
Calls the Gemini API with the provided prompt and API key.
|
170 |
+
This function performs a direct API call without explicit try-except blocks
|
171 |
+
for the API interaction itself, as per the request.
|
172 |
+
"""
|
173 |
+
# WARNING: Hardcoding API keys is not recommended for production.
|
174 |
+
# Consider using environment variables or a secure config management system.
|
175 |
+
api_key = "AIzaSyCSfIw2kWWO1c_oa7WQ33h11O6N8egNoqk" # User provided API key
|
176 |
+
|
177 |
+
if not user_prompt:
|
178 |
+
logging.warning("Gemini API call: Prompt is empty.")
|
179 |
+
return "Per favore, inserisci un prompt."
|
180 |
+
|
181 |
+
try:
|
182 |
+
# Configure the genai library with the API key
|
183 |
+
# This needs to be done once, but doing it per call is fine for this simple test
|
184 |
+
genai.configure(api_key=api_key)
|
185 |
+
|
186 |
+
# Specify the model name as requested
|
187 |
+
model_name = "gemini-2.5-flash-preview-05-20"
|
188 |
+
|
189 |
+
# Create a GenerativeModel instance
|
190 |
+
model = genai.GenerativeModel(model_name)
|
191 |
+
|
192 |
+
# Generate content based on the user's prompt
|
193 |
+
# Direct execution of the API call
|
194 |
+
response = model.generate_content(user_prompt)
|
195 |
+
|
196 |
+
# Log the response text (optional, for server-side debugging)
|
197 |
+
logging.info(f"Gemini API call successful. Prompt: '{user_prompt}'. Response: '{response.text[:100]}...'")
|
198 |
+
|
199 |
+
return response.text
|
200 |
+
except Exception as e:
|
201 |
+
# Catching exceptions here to provide a user-friendly message in the UI
|
202 |
+
# instead of letting the Gradio app crash or show a raw error.
|
203 |
+
logging.error(f"Error calling Gemini API: {e}", exc_info=True)
|
204 |
+
return f"Errore durante la chiamata API Gemini: {str(e)}"
|
205 |
+
|
206 |
+
|
207 |
# --- Gradio UI Blocks ---
|
208 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
|
209 |
title="LinkedIn Organization Dashboard") as app:
|
|
|
237 |
sync_status_html_output = gr.HTML("<p style='text-align:center;'>Stato sincronizzazione...</p>")
|
238 |
dashboard_display_html = gr.HTML("<p style='text-align:center;'>Caricamento dashboard...</p>")
|
239 |
|
240 |
+
# --- Gemini API Test Section ---
|
241 |
+
gr.Markdown("---") # Visual separator
|
242 |
+
gr.Markdown("## 🧪 Test Gemini API (Semplice)")
|
243 |
+
gr.Markdown("Inserisci un prompt e clicca il pulsante per testare la chiamata all'API Gemini.")
|
244 |
+
|
245 |
+
with gr.Row():
|
246 |
+
gemini_test_prompt_input = gr.Textbox(
|
247 |
+
label="Prompt per Gemini:",
|
248 |
+
value="Explain how AI works in a few words",
|
249 |
+
lines=2,
|
250 |
+
scale=3
|
251 |
+
)
|
252 |
+
gemini_test_btn = gr.Button("🚀 Esegui Test Gemini", scale=1, variant="secondary")
|
253 |
+
|
254 |
+
gemini_output_textbox = gr.Textbox(
|
255 |
+
label="Risposta da Gemini:",
|
256 |
+
interactive=False,
|
257 |
+
lines=5,
|
258 |
+
show_label=True,
|
259 |
+
placeholder="La risposta di Gemini apparirà qui..."
|
260 |
+
)
|
261 |
+
|
262 |
+
# Connect the button to the Gemini API call function
|
263 |
+
gemini_test_btn.click(
|
264 |
+
fn=call_gemini_api_direct_with_prompt,
|
265 |
+
inputs=[gemini_test_prompt_input],
|
266 |
+
outputs=[gemini_output_textbox],
|
267 |
+
api_name="test_gemini_simple" # Optional: for API access if needed by Gradio client
|
268 |
+
)
|
269 |
+
# --- End Gemini API Test Section ---
|
270 |
+
|
271 |
org_urn_display.change(
|
272 |
fn=initial_load_sequence,
|
273 |
inputs=[url_user_token_display, org_urn_display, token_state],
|
|
|
618 |
except ImportError:
|
619 |
logging.error("Matplotlib non è installato. I grafici non verranno generati.")
|
620 |
|
621 |
+
# Ensure you have the google-generativeai library installed:
|
622 |
+
# pip install google-generativeai
|
623 |
app.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|
|