pratikshahp commited on
Commit
2eaf991
·
verified ·
1 Parent(s): ac02fee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -36
app.py CHANGED
@@ -1,69 +1,75 @@
1
  import gradio as gr
2
- from together import Together
3
- from helper import get_together_api_key
4
  from guardrail import is_safe
 
5
 
6
  # Initialize Together client
7
  client = Together(api_key=get_together_api_key())
8
 
9
- # Function to generate responses
10
- def generate_response(message, history):
11
- system_prompt = """You are an AI assistant specialized in financial discussions. Please answer questions only related to finance. If the question is unrelated, respond with: 'I am sorry, I can only answer financial-related questions.'"""
 
 
 
 
 
 
 
 
 
12
 
13
- # Build the conversation context
14
- messages = [
15
- {"role": "system", "content": system_prompt},
16
- ]
17
 
18
- for action in history:
19
- if isinstance(action, tuple) and len(action) == 2:
20
- messages.append({"role": "user", "content": action[0]})
21
- messages.append({"role": "assistant", "content": action[1]})
22
 
23
  messages.append({"role": "user", "content": message})
24
 
25
- # Generate response using the Llama conversational model
26
- model_output = client.chat.completions.create(
27
  model="meta-llama/Llama-3-70b-chat-hf",
28
  messages=messages,
29
- )
30
 
31
- return model_output.choices[0].message.content
 
 
32
 
33
- # Main function to handle user input and responses
34
- def main_loop(message, history):
35
- # Use LlamaGuard for safety checks
36
- if not is_safe(message):
37
- return "Your input violates safety guidelines. Please rephrase your question.", history
38
 
39
- response = generate_response(message, history)
40
 
41
- # Perform safety check on the generated response
42
- if not is_safe(response):
43
- return "The generated response violates safety guidelines. Please try a different question.", history
 
 
 
 
 
 
 
44
 
45
- # Append user message and response to history
46
- history.append((message, response))
47
- return response, history
48
 
49
- # Gradio ChatInterface
50
  demo = gr.ChatInterface(
51
  main_loop,
52
  chatbot=gr.Chatbot(
53
  height=450,
54
- placeholder="Type your financial question here...",
55
- type="messages", # Ensures proper rendering
56
  ),
57
  textbox=gr.Textbox(
58
- placeholder="Ask about finance (e.g., investments, savings, etc.)",
59
  container=False,
60
  scale=7,
61
  ),
62
- title="Financial Chatbot",
63
  theme="Monochrome",
64
- examples=["What are mutual funds?", "How can I save for retirement?"],
65
  cache_examples=False,
66
  )
67
 
68
  # Launch the Gradio app
69
- demo.launch(share=True, server_name="0.0.0.0")
 
1
  import gradio as gr
2
+ from helper import load_world, get_together_api_key
 
3
  from guardrail import is_safe
4
+ from together import Together
5
 
6
  # Initialize Together client
7
  client = Together(api_key=get_together_api_key())
8
 
9
+ # Gradio interface functions
10
+ def run_action(message, history):
11
+ """
12
+ Generate a response based on the user's input and validate it against guardrails.
13
+ """
14
+ if message.lower() == "exit":
15
+ return "Thank you for using the Finance Chatbot. Goodbye!"
16
+
17
+ # Construct the prompt for the assistant
18
+ system_prompt = """You are a financial assistant. You can only answer finance-related queries.
19
+ - Do not answer non-finance questions.
20
+ - Ensure responses adhere to the safety policy."""
21
 
22
+ messages = [{"role": "system", "content": system_prompt}]
 
 
 
23
 
24
+ for user_message, bot_response in history:
25
+ messages.append({"role": "user", "content": user_message})
26
+ messages.append({"role": "assistant", "content": bot_response})
 
27
 
28
  messages.append({"role": "user", "content": message})
29
 
30
+ # Generate response using LLM
31
+ response = client.chat.completions.create(
32
  model="meta-llama/Llama-3-70b-chat-hf",
33
  messages=messages,
34
+ ).choices[0].message.content
35
 
36
+ # Check if the response is safe
37
+ if not is_safe(response):
38
+ return "Sorry, I cannot provide a safe response to that query."
39
 
40
+ return response
 
 
 
 
41
 
 
42
 
43
+ def main_loop(message, history):
44
+ """
45
+ Main loop for the chatbot to handle user input.
46
+ """
47
+ # Validate the user's input for safety
48
+ if not is_safe(message):
49
+ return "Your input violates our safety policy. Please try again with a finance-related query."
50
+
51
+ # Generate and validate the response
52
+ return run_action(message, history)
53
 
 
 
 
54
 
55
+ # Gradio Chat Interface
56
  demo = gr.ChatInterface(
57
  main_loop,
58
  chatbot=gr.Chatbot(
59
  height=450,
60
+ placeholder="Ask a finance-related question. Type 'exit' to quit.",
61
+ type="messages", # Proper rendering of chat format
62
  ),
63
  textbox=gr.Textbox(
64
+ placeholder="What do you want to ask about finance?",
65
  container=False,
66
  scale=7,
67
  ),
68
+ title="Finance Chatbot",
69
  theme="Monochrome",
70
+ examples=["What is compound interest?", "How to save for retirement?", "What are tax-saving options?"],
71
  cache_examples=False,
72
  )
73
 
74
  # Launch the Gradio app
75
+ demo.launch(share=True, server_name="0.0.0.0")