aidevhund commited on
Commit
8dad82c
·
verified ·
1 Parent(s): c7f8a5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -71
app.py CHANGED
@@ -11,25 +11,21 @@ client = OpenAI(
11
  )
12
  print("OpenAI client initialized.")
13
 
14
-
15
  def respond(
16
  message,
17
  history: list[tuple[str, str]],
18
- system_message,
19
- max_tokens,
20
- temperature,
21
- top_p,
22
- frequency_penalty,
23
- seed,
24
- custom_model
25
  ):
26
-
27
  print(f"Received message: {message}")
28
  print(f"History: {history}")
29
  print(f"System message: {system_message}")
30
  print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
31
  print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
32
- print(f"Selected model (custom_model): {custom_model}")
33
 
34
  # Convert seed to None if -1 (meaning random)
35
  if seed == -1:
@@ -53,8 +49,8 @@ def respond(
53
  messages.append({"role": "user", "content": message})
54
  print("Latest user message appended.")
55
 
56
- # If user provided a model, use that; otherwise, fall back to a default model
57
- model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
58
  print(f"Model selected for inference: {model_to_use}")
59
 
60
  # Start with an empty string to build the response as tokens stream in
@@ -80,10 +76,10 @@ def respond(
80
 
81
  # GRADIO UI
82
 
83
- chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Select a model and begin chatting", likeable=True, layout="panel")
84
  print("Chatbot interface created.")
85
 
86
- system_message_box = gr.Textbox(value="", placeholder="You are a helpful assistant.", label="System Prompt")
87
 
88
  max_tokens_slider = gr.Slider(
89
  minimum=1,
@@ -121,21 +117,7 @@ seed_slider = gr.Slider(
121
  label="Seed (-1 for random)"
122
  )
123
 
124
- # The custom_model_box is what the respond function sees as "custom_model"
125
- custom_model_box = gr.Textbox(
126
- value="",
127
- label="Custom Model",
128
- info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
129
- placeholder="meta-llama/Llama-3.3-70B-Instruct"
130
- )
131
-
132
- def set_custom_model_from_radio(selected):
133
- """
134
- This function will get triggered whenever someone picks a model from the 'Featured Models' radio.
135
- We will update the Custom Model text box with that selection automatically.
136
- """
137
- print(f"Featured model selected: {selected}")
138
- return selected
139
 
140
  demo = gr.ChatInterface(
141
  fn=respond,
@@ -146,7 +128,6 @@ demo = gr.ChatInterface(
146
  top_p_slider,
147
  frequency_penalty_slider,
148
  seed_slider,
149
- custom_model_box,
150
  ],
151
  fill_height=True,
152
  chatbot=chatbot,
@@ -155,49 +136,11 @@ demo = gr.ChatInterface(
155
  print("ChatInterface object created.")
156
 
157
  with demo:
158
- with gr.Accordion("Model Selection", open=False):
159
- model_search_box = gr.Textbox(
160
- label="Filter Models",
161
- placeholder="Search for a featured model...",
162
- lines=1
163
- )
164
- print("Model search box created.")
165
-
166
- models_list = [
167
- "meta-llama/Llama-3.3-70B-Instruct"
168
- ]
169
- print("Models list initialized.")
170
-
171
- featured_model_radio = gr.Radio(
172
- label="Select a model below",
173
- choices=models_list,
174
- value="meta-llama/Llama-3.3-70B-Instruct",
175
- interactive=True
176
- )
177
- print("Featured models radio button created.")
178
-
179
- def filter_models(search_term):
180
- print(f"Filtering models with search term: {search_term}")
181
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
182
- print(f"Filtered models: {filtered}")
183
- return gr.update(choices=filtered)
184
-
185
- model_search_box.change(
186
- fn=filter_models,
187
- inputs=model_search_box,
188
- outputs=featured_model_radio
189
- )
190
- print("Model search box change event linked.")
191
-
192
- featured_model_radio.change(
193
- fn=set_custom_model_from_radio,
194
- inputs=featured_model_radio,
195
- outputs=custom_model_box
196
- )
197
- print("Featured model radio button change event linked.")
198
 
199
  print("Gradio interface initialized.")
200
 
201
  if __name__ == "__main__":
202
  print("Launching the demo application.")
203
- demo.launch()
 
11
  )
12
  print("OpenAI client initialized.")
13
 
 
14
  def respond(
15
  message,
16
  history: list[tuple[str, str]],
17
+ system_message="You are a helpful assistant.",
18
+ max_tokens=512,
19
+ temperature=0.7,
20
+ top_p=0.95,
21
+ frequency_penalty=0.0,
22
+ seed=-1
 
23
  ):
 
24
  print(f"Received message: {message}")
25
  print(f"History: {history}")
26
  print(f"System message: {system_message}")
27
  print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
28
  print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
 
29
 
30
  # Convert seed to None if -1 (meaning random)
31
  if seed == -1:
 
49
  messages.append({"role": "user", "content": message})
50
  print("Latest user message appended.")
51
 
52
+ # Set the model to "meta" by default
53
+ model_to_use = "meta-llama/Llama-3.3-70B-Instruct"
54
  print(f"Model selected for inference: {model_to_use}")
55
 
56
  # Start with an empty string to build the response as tokens stream in
 
76
 
77
  # GRADIO UI
78
 
79
+ chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Start chatting!", likeable=True, layout="panel")
80
  print("Chatbot interface created.")
81
 
82
+ system_message_box = gr.Textbox(value="You are a helpful assistant.", label="System Prompt", visible=False)
83
 
84
  max_tokens_slider = gr.Slider(
85
  minimum=1,
 
117
  label="Seed (-1 for random)"
118
  )
119
 
120
+ # Removed the custom_model_box as the model is pre-set
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  demo = gr.ChatInterface(
123
  fn=respond,
 
128
  top_p_slider,
129
  frequency_penalty_slider,
130
  seed_slider,
 
131
  ],
132
  fill_height=True,
133
  chatbot=chatbot,
 
136
  print("ChatInterface object created.")
137
 
138
  with demo:
139
+ # No need for a model selection accordion since the model is fixed to "meta-llama"
140
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  print("Gradio interface initialized.")
143
 
144
  if __name__ == "__main__":
145
  print("Launching the demo application.")
146
+ demo.launch()