Update app.py
Browse files
app.py
CHANGED
@@ -8,36 +8,46 @@ client = Together(api_key=get_together_api_key())
|
|
8 |
|
9 |
# Gradio interface functions
|
10 |
def run_action(message, history):
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
if message.lower() == "exit":
|
15 |
-
|
|
|
16 |
|
17 |
-
# Construct the prompt for the assistant
|
18 |
system_prompt = """You are a financial assistant. You can only answer finance-related queries.
|
19 |
- Do not answer non-finance questions.
|
|
|
20 |
- Ensure responses adhere to the safety policy."""
|
21 |
|
22 |
messages = [{"role": "system", "content": system_prompt}]
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
27 |
|
|
|
28 |
messages.append({"role": "user", "content": message})
|
29 |
|
30 |
-
#
|
31 |
-
|
32 |
model="meta-llama/Llama-3-70b-chat-hf",
|
33 |
messages=messages,
|
34 |
-
)
|
35 |
-
|
36 |
-
# Check if the response is safe
|
37 |
-
if not is_safe(response):
|
38 |
-
return "Sorry, I cannot provide a safe response to that query."
|
39 |
|
40 |
-
return
|
41 |
|
42 |
|
43 |
def main_loop(message, history):
|
|
|
8 |
|
9 |
# Gradio interface functions
|
10 |
def run_action(message, history):
|
11 |
+
global game_state, game_running # Access the global game state and game status
|
12 |
+
|
13 |
+
if not game_running:
|
14 |
+
return "The game has ended. Type 'restart the game' to play again."
|
15 |
+
|
16 |
+
if message.lower() == "start game":
|
17 |
+
return game_state["start"]
|
18 |
+
|
19 |
+
if message.lower() == "restart the game":
|
20 |
+
game_state = initialize_game_state()
|
21 |
+
return "Game restarted! " + game_state["start"]
|
22 |
+
|
23 |
if message.lower() == "exit":
|
24 |
+
game_running = False
|
25 |
+
return "The game has ended. Type 'restart the game' to play again."
|
26 |
|
|
|
27 |
system_prompt = """You are a financial assistant. You can only answer finance-related queries.
|
28 |
- Do not answer non-finance questions.
|
29 |
+
- Answer in 50 words
|
30 |
- Ensure responses adhere to the safety policy."""
|
31 |
|
32 |
messages = [{"role": "system", "content": system_prompt}]
|
33 |
|
34 |
+
# Convert history into the appropriate format
|
35 |
+
for entry in history:
|
36 |
+
if entry["role"] == "user":
|
37 |
+
messages.append({"role": "user", "content": entry["content"]})
|
38 |
+
elif entry["role"] == "assistant":
|
39 |
+
messages.append({"role": "assistant", "content": entry["content"]})
|
40 |
|
41 |
+
# Add the user's current action
|
42 |
messages.append({"role": "user", "content": message})
|
43 |
|
44 |
+
# Get the model's response
|
45 |
+
model_output = client.chat.completions.create(
|
46 |
model="meta-llama/Llama-3-70b-chat-hf",
|
47 |
messages=messages,
|
48 |
+
)
|
|
|
|
|
|
|
|
|
49 |
|
50 |
+
return model_output.choices[0].message.content
|
51 |
|
52 |
|
53 |
def main_loop(message, history):
|