aidevhund commited on
Commit
ce2af7e
·
verified ·
1 Parent(s): 9c12531

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -108
app.py CHANGED
@@ -11,32 +11,86 @@ client = OpenAI(
11
  )
12
  print("OpenAI client initialized.")
13
 
14
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def respond(
16
  message,
17
  history: list[tuple[str, str]],
18
- system_message,
19
  max_tokens,
20
  temperature,
21
  top_p,
22
  frequency_penalty,
23
- seed,
24
- custom_model
25
  ):
26
-
27
  print(f"Received message: {message}")
28
  print(f"History: {history}")
29
- print(f"System message: {system_message}")
30
- print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
31
- print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
32
- print(f"Selected model (custom_model): {custom_model}")
33
 
34
  # Convert seed to None if -1 (meaning random)
35
  if seed == -1:
36
  seed = None
37
 
38
- messages = [{"role": "system", "content": system_message}]
39
- print("Initial messages array constructed.")
40
 
41
  # Add conversation history to the context
42
  for val in history:
@@ -44,25 +98,18 @@ def respond(
44
  assistant_part = val[1]
45
  if user_part:
46
  messages.append({"role": "user", "content": user_part})
47
- print(f"Added user message to context: {user_part}")
48
  if assistant_part:
49
  messages.append({"role": "assistant", "content": assistant_part})
50
- print(f"Added assistant message to context: {assistant_part}")
51
 
52
  # Append the latest user message
53
  messages.append({"role": "user", "content": message})
54
- print("Latest user message appended.")
55
 
56
- # If user provided a model, use that; otherwise, fall back to a default model
57
- model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
58
- print(f"Model selected for inference: {model_to_use}")
59
-
60
- # Start with an empty string to build the response as tokens stream in
61
  response = ""
62
  print("Sending request to OpenAI API.")
63
 
64
  for message_chunk in client.chat.completions.create(
65
- model=model_to_use,
66
  max_tokens=max_tokens,
67
  stream=True,
68
  temperature=temperature,
@@ -72,18 +119,13 @@ def respond(
72
  messages=messages,
73
  ):
74
  token_text = message_chunk.choices[0].delta.content
75
- print(f"Received token: {token_text}")
76
  response += token_text
77
  yield response
78
 
79
  print("Completed response generation.")
80
 
81
- # GRADIO UI
82
-
83
- chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Select a model and begin chatting", likeable=True, layout="panel")
84
- print("Chatbot interface created.")
85
-
86
- system_message_box = gr.Textbox(value="", placeholder="You are a helpful assistant.", label="System Prompt")
87
 
88
  max_tokens_slider = gr.Slider(
89
  minimum=1,
@@ -121,99 +163,18 @@ seed_slider = gr.Slider(
121
  label="Seed (-1 for random)"
122
  )
123
 
124
- # The custom_model_box is what the respond function sees as "custom_model"
125
- custom_model_box = gr.Textbox(
126
- value="",
127
- label="Custom Model",
128
- info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
129
- placeholder="meta-llama/Llama-3.3-70B-Instruct"
130
- )
131
-
132
- def set_custom_model_from_radio(selected):
133
- """
134
- This function will get triggered whenever someone picks a model from the 'Featured Models' radio.
135
- We will update the Custom Model text box with that selection automatically.
136
- """
137
- print(f"Featured model selected: {selected}")
138
- return selected
139
-
140
  demo = gr.ChatInterface(
141
  fn=respond,
142
  additional_inputs=[
143
- system_message_box,
144
  max_tokens_slider,
145
  temperature_slider,
146
  top_p_slider,
147
  frequency_penalty_slider,
148
  seed_slider,
149
- custom_model_box,
150
  ],
151
  fill_height=True,
152
  chatbot=chatbot,
153
- theme="Nymbo/Nymbo_Theme",
154
  )
155
- print("ChatInterface object created.")
156
-
157
- with demo:
158
- with gr.Accordion("Model Selection", open=False):
159
- model_search_box = gr.Textbox(
160
- label="Filter Models",
161
- placeholder="Search for a featured model...",
162
- lines=1
163
- )
164
- print("Model search box created.")
165
-
166
- models_list = [
167
- "meta-llama/Llama-3.3-70B-Instruct",
168
- "meta-llama/Llama-3.2-3B-Instruct",
169
- "meta-llama/Llama-3.2-1B-Instruct",
170
- "meta-llama/Llama-3.1-8B-Instruct",
171
- "NousResearch/Hermes-3-Llama-3.1-8B",
172
- "google/gemma-2-27b-it",
173
- "google/gemma-2-9b-it",
174
- "google/gemma-2-2b-it",
175
- "mistralai/Mistral-Nemo-Instruct-2407",
176
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
177
- "mistralai/Mistral-7B-Instruct-v0.3",
178
- "Qwen/Qwen2.5-72B-Instruct",
179
- "Qwen/QwQ-32B-Preview",
180
- "PowerInfer/SmallThinker-3B-Preview",
181
- "HuggingFaceTB/SmolLM2-1.7B-Instruct",
182
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
183
- "microsoft/Phi-3.5-mini-instruct",
184
- ]
185
- print("Models list initialized.")
186
-
187
- featured_model_radio = gr.Radio(
188
- label="Select a model below",
189
- choices=models_list,
190
- value="meta-llama/Llama-3.3-70B-Instruct",
191
- interactive=True
192
- )
193
- print("Featured models radio button created.")
194
-
195
- def filter_models(search_term):
196
- print(f"Filtering models with search term: {search_term}")
197
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
198
- print(f"Filtered models: {filtered}")
199
- return gr.update(choices=filtered)
200
-
201
- model_search_box.change(
202
- fn=filter_models,
203
- inputs=model_search_box,
204
- outputs=featured_model_radio
205
- )
206
- print("Model search box change event linked.")
207
-
208
- featured_model_radio.change(
209
- fn=set_custom_model_from_radio,
210
- inputs=featured_model_radio,
211
- outputs=custom_model_box
212
- )
213
- print("Featured model radio button change event linked.")
214
-
215
- print("Gradio interface initialized.")
216
 
217
  if __name__ == "__main__":
218
- print("Launching the demo application.")
219
  demo.launch()
 
11
  )
12
  print("OpenAI client initialized.")
13
 
14
+ # Define a comprehensive system prompt
15
+ SYSTEM_PROMPT = """
16
+ You are a highly knowledgeable and reliable Crypto Trading Advisor and Analyzer designed and created by HundAI. Your primary goal is to assist users in understanding, analyzing, and making informed decisions about cryptocurrency trading. You provide accurate, concise, and actionable advice based on real-time data, historical trends, and established best practices. Below are your core responsibilities and interaction guidelines:
17
+
18
+ ### 1. Communication Style
19
+ - Be professional, approachable, and clear.
20
+ - Explain complex terms in simple language, especially for novice users.
21
+ - Always maintain an unbiased, neutral stance and avoid recommending specific cryptocurrencies or financial decisions.
22
+
23
+ ### 2. Core Responsibilities
24
+ #### Market Analysis:
25
+ - Analyze and provide insights into cryptocurrency market trends, including market capitalization, trading volume, price momentum, and historical performance.
26
+ - Identify patterns, trends, and potential opportunities based on user-provided data or general market conditions.
27
+
28
+ #### Portfolio Insights:
29
+ - Help users review their crypto portfolios for diversification, risk exposure, and potential improvements.
30
+ - Suggest strategies for optimizing portfolio performance based on market conditions.
31
+
32
+ #### Risk Management:
33
+ - Educate users on effective risk management strategies, including stop-loss and take-profit orders, position sizing, and diversification.
34
+ - Warn about potential risks like high volatility, scams, or regulatory changes.
35
+
36
+ #### Technical Analysis:
37
+ - Provide detailed chart analysis using tools like moving averages, RSI, MACD, Bollinger Bands, Fibonacci retracements, and candlestick patterns.
38
+ - Explain support and resistance levels, trend lines, and potential breakout scenarios.
39
+
40
+ #### Fundamental Analysis:
41
+ - Share insights into the fundamentals of cryptocurrencies, including tokenomics, utility, developer activity, and recent news.
42
+ - Highlight events such as regulatory updates, partnerships, or technological advancements that may impact the market.
43
+
44
+ #### Education and Guidance:
45
+ - Educate users about blockchain technology, decentralized finance (DeFi), staking, NFTs, and emerging trends.
46
+ - Offer advice tailored to different trading styles (e.g., day trading, swing trading, long-term investing).
47
+
48
+ #### Alert Mechanism:
49
+ - Notify users about significant market events like price surges, dips, or whale movements.
50
+ - Provide insights on real-time news and announcements impacting the crypto market.
51
+
52
+ ### 3. Interaction Guidelines
53
+ - Respond promptly and accurately to user queries.
54
+ - Suggest safe and ethical trading practices.
55
+ - Always remind users to do their own research (DYOR) and consult financial professionals where appropriate.
56
+
57
+ ### 4. Disclaimer
58
+ - Remind users that cryptocurrency trading involves significant risk and past performance does not guarantee future results.
59
+ - Clearly state that your responses are for informational purposes only and not financial advice.
60
+
61
+ ### Example Interactions
62
+ #### Example 1: Market Analysis
63
+ _User Query:_ "What’s the current trend of Bitcoin?"
64
+ _Response:_ "Bitcoin is currently trading at $X, showing a [bullish/bearish] trend over the past 24 hours. Trading volume has [increased/decreased] by X%, and RSI indicates [overbought/oversold] conditions. Short-term support is at $Y, and resistance is at $Z."
65
+
66
+ #### Example 2: Portfolio Review
67
+ _User Query:_ "Is my portfolio balanced?"
68
+ _Response:_ "Your portfolio comprises X% Bitcoin, Y% Ethereum, and Z% altcoins. To reduce risk, consider allocating X% to stablecoins or large-cap cryptocurrencies. Currently, your exposure to high-volatility assets is X%, which may pose additional risk."
69
+
70
+ #### Example 3: Risk Management
71
+ _User Query:_ "How do I protect my trades?"
72
+ _Response:_ "You can use stop-loss orders at $X to limit potential losses or take-profit orders at $Y to secure gains. Avoid over-leveraging and limit each trade to a percentage of your total capital, such as 1-2%."
73
+ """
74
+
75
+ # Function to handle chatbot responses
76
  def respond(
77
  message,
78
  history: list[tuple[str, str]],
 
79
  max_tokens,
80
  temperature,
81
  top_p,
82
  frequency_penalty,
83
+ seed
 
84
  ):
 
85
  print(f"Received message: {message}")
86
  print(f"History: {history}")
 
 
 
 
87
 
88
  # Convert seed to None if -1 (meaning random)
89
  if seed == -1:
90
  seed = None
91
 
92
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
93
+ print("System prompt added to messages.")
94
 
95
  # Add conversation history to the context
96
  for val in history:
 
98
  assistant_part = val[1]
99
  if user_part:
100
  messages.append({"role": "user", "content": user_part})
 
101
  if assistant_part:
102
  messages.append({"role": "assistant", "content": assistant_part})
 
103
 
104
  # Append the latest user message
105
  messages.append({"role": "user", "content": message})
 
106
 
107
+ # Start response generation
 
 
 
 
108
  response = ""
109
  print("Sending request to OpenAI API.")
110
 
111
  for message_chunk in client.chat.completions.create(
112
+ model="meta-llama/Llama-3.3-70B-Instruct",
113
  max_tokens=max_tokens,
114
  stream=True,
115
  temperature=temperature,
 
119
  messages=messages,
120
  ):
121
  token_text = message_chunk.choices[0].delta.content
 
122
  response += token_text
123
  yield response
124
 
125
  print("Completed response generation.")
126
 
127
+ # Gradio UI
128
+ chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Ask about crypto trading or analysis.", likeable=True)
 
 
 
 
129
 
130
  max_tokens_slider = gr.Slider(
131
  minimum=1,
 
163
  label="Seed (-1 for random)"
164
  )
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  demo = gr.ChatInterface(
167
  fn=respond,
168
  additional_inputs=[
 
169
  max_tokens_slider,
170
  temperature_slider,
171
  top_p_slider,
172
  frequency_penalty_slider,
173
  seed_slider,
 
174
  ],
175
  fill_height=True,
176
  chatbot=chatbot,
 
177
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  if __name__ == "__main__":
 
180
  demo.launch()