akhatr-phyniks commited on
Commit
c5dfb6b
·
verified ·
1 Parent(s): c3d2e3e

updated langfuse added authentication

Browse files
Files changed (1) hide show
  1. app.py +42 -47
app.py CHANGED
@@ -3,6 +3,7 @@ import uuid
3
  import gradio as gr
4
  from openai import OpenAI
5
  from langfuse import Langfuse
 
6
  from dotenv import load_dotenv
7
 
8
  # Load environment variables from .env file if it exists
@@ -20,6 +21,7 @@ try:
20
  langfuse = Langfuse(
21
  public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
22
  secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
 
23
  )
24
  except Exception as e:
25
  print(f"Warning: Langfuse client initialization failed: {e}")
@@ -41,25 +43,38 @@ def create_chat_interface():
41
  except Exception as e:
42
  print(f"Warning: Failed to create Langfuse trace: {e}")
43
 
44
- def respond(message, chat_history):
45
- if not message:
46
- return chat_history, ""
 
 
 
 
 
 
 
 
 
 
47
 
48
- # Track user message if Langfuse is available
49
- span = None
50
- if trace:
51
  try:
52
- span = trace.span(
53
- name="user_message",
54
- input={"message": message}
55
- )
 
56
  except Exception as e:
57
- print(f"Warning: Failed to create Langfuse span: {e}")
58
 
59
- try:
60
- if not client:
61
- return chat_history + [(message, "Error: OpenAI client not initialized. Please check your API key.")], ""
62
 
 
 
 
 
 
63
  # Format chat history for OpenAI
64
  messages = [
65
  {"role": "system", "content": """
@@ -128,48 +143,20 @@ Write like the stakes are real. Because they are.
128
  # Add current message
129
  messages.append({"role": "user", "content": message})
130
 
131
- # Get response from OpenAI
132
- response = client.chat.completions.create(
133
- model="gpt-3.5-turbo",
134
- messages=messages
135
- )
136
-
137
- assistant_message = response.choices[0].message.content
138
-
139
- # Track assistant response if Langfuse is available
140
- if span:
141
- try:
142
- span.end(
143
- output={"response": assistant_message},
144
- metadata={
145
- "model": "gpt-3.5-turbo",
146
- "tokens": response.usage.total_tokens
147
- }
148
- )
149
- except Exception as e:
150
- print(f"Warning: Failed to end Langfuse span: {e}")
151
 
152
  chat_history.append((message, assistant_message))
153
  return chat_history, ""
154
 
155
  except Exception as e:
156
- # End span with error if Langfuse is available
157
- if span:
158
- try:
159
- span.end(
160
- output={"error": str(e)},
161
- level="ERROR"
162
- )
163
- except Exception as span_error:
164
- print(f"Warning: Failed to end Langfuse span with error: {span_error}")
165
-
166
  error_message = f"Error: {str(e)}"
167
  return chat_history + [(message, error_message)], ""
168
 
169
  # Create Gradio interface
170
  with gr.Blocks() as demo:
171
- gr.Markdown("# Rag Bot AI - LLM")
172
- gr.Markdown("")
173
 
174
  chatbot = gr.Chatbot(height=600)
175
  with gr.Row():
@@ -187,4 +174,12 @@ Write like the stakes are real. Because they are.
187
 
188
  # Create and launch the interface
189
  demo = create_chat_interface()
190
- demo.launch()
 
 
 
 
 
 
 
 
 
3
  import gradio as gr
4
  from openai import OpenAI
5
  from langfuse import Langfuse
6
+ from langfuse.decorators import observe
7
  from dotenv import load_dotenv
8
 
9
  # Load environment variables from .env file if it exists
 
21
  langfuse = Langfuse(
22
  public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
23
  secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
24
+ host=os.getenv("LANGFUSE_HOST")
25
  )
26
  except Exception as e:
27
  print(f"Warning: Langfuse client initialization failed: {e}")
 
43
  except Exception as e:
44
  print(f"Warning: Failed to create Langfuse trace: {e}")
45
 
46
+ @observe(
47
+ name="chat_completion",
48
+ capture_input=True,
49
+ capture_output=True
50
+ )
51
+ def get_completion(messages):
52
+ if not client:
53
+ raise Exception("OpenAI client not initialized. Please check your API key.")
54
+
55
+ response = client.chat.completions.create(
56
+ model="gpt-3.5-turbo",
57
+ messages=messages
58
+ )
59
 
60
+ # Add model info to the span
61
+ if langfuse and trace:
 
62
  try:
63
+ current_span = trace.get_current_span()
64
+ if current_span:
65
+ current_span.update(
66
+ metadata={"model": "gpt-3.5-turbo", "tokens": response.usage.total_tokens}
67
+ )
68
  except Exception as e:
69
+ print(f"Warning: Failed to update span metadata: {e}")
70
 
71
+ return response.choices[0].message.content
 
 
72
 
73
+ def respond(message, chat_history):
74
+ if not message:
75
+ return chat_history, ""
76
+
77
+ try:
78
  # Format chat history for OpenAI
79
  messages = [
80
  {"role": "system", "content": """
 
143
  # Add current message
144
  messages.append({"role": "user", "content": message})
145
 
146
+ # Get response from OpenAI with Langfuse tracking
147
+ assistant_message = get_completion(messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  chat_history.append((message, assistant_message))
150
  return chat_history, ""
151
 
152
  except Exception as e:
 
 
 
 
 
 
 
 
 
 
153
  error_message = f"Error: {str(e)}"
154
  return chat_history + [(message, error_message)], ""
155
 
156
  # Create Gradio interface
157
  with gr.Blocks() as demo:
158
+ gr.Markdown("# Greg Logan AI - Brand Strategy Assistant")
159
+ gr.Markdown("Get direct, punchy, and provocative brand strategy insights from Greg Logan's perspective.")
160
 
161
  chatbot = gr.Chatbot(height=600)
162
  with gr.Row():
 
174
 
175
  # Create and launch the interface
176
  demo = create_chat_interface()
177
+
178
+ # Get auth credentials from environment variables
179
+ auth_username = os.getenv("AUTH_USERNAME", "admin")
180
+ auth_password = os.getenv("AUTH_PASSWORD", "admin")
181
+
182
+ # Launch with authentication
183
+ demo.launch(
184
+ auth=(auth_username, auth_password)
185
+ )