Reality123b commited on
Commit
a73ce8f
·
verified ·
1 Parent(s): 56d5550

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -24
app.py CHANGED
@@ -187,16 +187,6 @@ def respond(
187
  yield custom_response
188
  return
189
 
190
- if is_image_request(message):
191
- try:
192
- image = generate_image(message)
193
- if image:
194
- return f"Here's your generated image based on: {message}"
195
- else:
196
- return "Sorry, I couldn't generate the image. Please try again."
197
- except Exception as e:
198
- return f"An error occurred while generating the image: {str(e)}"
199
-
200
  # Prepare conversation history
201
  messages = [{"role": "system", "content": system_message}]
202
  for val in history:
@@ -207,18 +197,61 @@ def respond(
207
 
208
  messages.append({"role": "user", "content": message})
209
 
210
- # Get response from model
211
- response = ""
212
- for message in text_client.chat_completion(
213
- messages,
214
- max_tokens=max_tokens,
215
- stream=True,
216
- temperature=temperature,
217
- top_p=top_p,
218
- ):
219
- token = message.choices[0].delta.content
220
- response += token
221
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
  yield response
224
 
@@ -260,7 +293,6 @@ Focus on accuracy, respect for sensitive topics, transparency, and professionali
260
  Version: Xylaria-1.2.9
261
  """
262
 
263
-
264
  # Gradio chat interface
265
  demo = gr.ChatInterface(
266
  respond,
@@ -293,4 +325,4 @@ demo = gr.ChatInterface(
293
  ],
294
  css=custom_css
295
  )
296
- demo.launch()
 
187
  yield custom_response
188
  return
189
 
 
 
 
 
 
 
 
 
 
 
190
  # Prepare conversation history
191
  messages = [{"role": "system", "content": system_message}]
192
  for val in history:
 
197
 
198
  messages.append({"role": "user", "content": message})
199
 
200
+ # Check if the user wants a fast or critical response
201
+ if "critical" in message.lower():
202
+ # Critical thinker logic (like the 'nemo' function)
203
+ budget = 3 # Starting budget for steps
204
+ step_response = ""
205
+ message_critical = f"""[INST] [SYSTEM] You are a helpful assistant in normal conversation.
206
+ When given a problem to solve, you are an expert problem-solving assistant.
207
+ Your task is to provide a detailed, step-by-step solution to a given question.
208
+ Follow these instructions carefully:
209
+ 1. Read the given question carefully and reset counter between <count> and </count> to {budget} (maximum 3 steps).
210
+ 2. Think critically like a human researcher or scientist. Break down the problem using first principles to conceptually understand and answer the question.
211
+ 3. Generate a detailed, logical step-by-step solution.
212
+ 4. Enclose each step of your solution within <step> and </step> tags.
213
+ 5. You are allowed to use at most {budget} steps (starting budget), keep track of it by counting down within tags <count> </count>, STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
214
+ 6. Do a self-reflection when you are unsure about how to proceed, based on the self-reflection and reward, decide whether you need to return to the previous steps.
215
+ 7. After completing the solution steps, reorganize and synthesize the steps into the final answer within <answer> and </answer> tags.
216
+ 8. Provide a critical, honest, and subjective self-evaluation of your reasoning process within <reflection> and </reflection> tags.
217
+ 9. Assign a quality score to your solution as a float between 0.0 (lowest quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
218
+ Example format:
219
+ <count> [starting budget] </count>
220
+ <step> [Content of step 1] </step>
221
+ <count> [remaining budget] </count>
222
+ <step> [Content of step 2] </step>
223
+ <reflection> [Evaluation of the steps so far] </reflection>
224
+ <reward> [Float between 0.0 and 1.0] </reward>
225
+ <count> [remaining budget] </count>
226
+ <step> [Content of step 3 or Content of some previous step] </step>
227
+ <count> [remaining budget] </count>
228
+ ...
229
+ <step> [Content of final step] </step>
230
+ <count> [remaining budget] </count>
231
+ <answer> [Final Answer] </answer> (must give final answer in this format)
232
+ <reflection> [Evaluation of the solution] </reflection>
233
+ <reward> [Float between 0.0 and 1.0] </reward> [/INST] [INST] [QUERY] {message} [/INST] [ASSISTANT] """
234
+
235
+ # Send critical thinking message to client and stream response
236
+ stream = text_client.text_generation(message_critical, max_new_tokens=4096, stream=True, details=True, return_full_text=False)
237
+ critical_response = ""
238
+ for response in stream:
239
+ critical_response += response.token.text
240
+ yield critical_response
241
+
242
+ else:
243
+ # Standard fast response logic (like 'models')
244
+ response = ""
245
+ for message in text_client.chat_completion(
246
+ messages,
247
+ max_tokens=max_tokens,
248
+ stream=True,
249
+ temperature=temperature,
250
+ top_p=top_p,
251
+ ):
252
+ token = message.choices[0].delta.content
253
+ response += token
254
+ yield response
255
 
256
  yield response
257
 
 
293
  Version: Xylaria-1.2.9
294
  """
295
 
 
296
  # Gradio chat interface
297
  demo = gr.ChatInterface(
298
  respond,
 
325
  ],
326
  css=custom_css
327
  )
328
+ demo.launch()