syedmudassir16 commited on
Commit
c3d587d
·
verified ·
1 Parent(s): b444f81

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +28 -60
utils.py CHANGED
@@ -145,7 +145,7 @@ def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=2
145
  return wav_buf.read()
146
 
147
  def format_prompt(message, history):
148
- system_message = """
149
  You are a smart mood analyser, who determines user mood. Based on the user input, classify the mood of the user into one of the four moods {Happy, Sad, Instrumental, Party}. If you are finding it difficult to classify into one of these four moods, keep the conversation going on until we classify the user’s mood. Return a single-word reply from one of the options if you have classified. Suppose you classify a sentence as happy, then just respond with "happy".
150
  Note: Do not write anything else other than the classified mood if classified.
151
  Note: If any question or any user text cannot be classified, follow up with a question to know the user's mood until you classify the mood.
@@ -261,54 +261,19 @@ def format_prompt(message, history):
261
  User: Lets turn up the music and have some fun!
262
  LLM Response: Party
263
  """
264
- prompt = f"<s>{system_message}"
 
 
265
  for user_prompt, bot_response in history:
266
- prompt += f"\n User:{user_prompt}\n LLM Response:{bot_response}"
267
-
268
- # Add the current message
269
- prompt += f"\nUser: {message}\nLLM Response:"
 
 
 
270
  return prompt
271
 
272
- def classify_mood(input_string):
273
- input_string = input_string.lower()
274
- mood_words = {"happy", "sad", "instrumental", "party"}
275
- for word in mood_words:
276
- if word in input_string:
277
- return word, True
278
- return None, False
279
-
280
- # def generate_llm_output(
281
- # prompt, history, temperature=0.1, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.0,
282
- # ):
283
- # temperature = temperature
284
- # top_p = float(top_p)
285
-
286
- # generate_kwargs = dict(
287
- # temperature=temperature,
288
- # max_new_tokens=max_new_tokens,
289
- # top_p=top_p,
290
- # repetition_penalty=repetition_penalty,
291
- # do_sample=True,
292
- # seed=42,
293
- # )
294
-
295
- # formatted_prompt = format_prompt(prompt, history)
296
-
297
- # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
298
- # output = ""
299
-
300
- # for response in stream:
301
- # output += response.token.text
302
- # mood, is_classified = classify_mood(output)
303
- # # Print the chatbot's response
304
- # if is_classified:
305
- # print("Chatbot:", mood.capitalize())
306
- # playlist_message = f"Playing {mood.capitalize()} playlist for you!"
307
- # output=playlist_message
308
- # return output
309
- # # yield output
310
- # return output
311
-
312
  def generate_llm_output(
313
  prompt,
314
  history,
@@ -318,8 +283,9 @@ def generate_llm_output(
318
  top_p=0.95,
319
  stop_words=["<s>","[/INST]", "</s>"]
320
  ):
321
- temperature = temperature
322
-
 
323
  top_p = float(top_p)
324
 
325
  generate_kwargs = dict(
@@ -339,23 +305,25 @@ def generate_llm_output(
339
  )
340
  output = ""
341
  for response in stream:
342
- output += response.token.text
343
- mood, is_classified = classify_mood(output)
344
- # Print the chatbot's response
345
- if is_classified:
346
- print("Chatbot:", mood.capitalize())
347
- playlist_message = f"Playing {mood.capitalize()} playlist for you!"
348
- output=playlist_message
349
- return output
350
- # yield output
351
- return output
 
 
352
 
353
  except Exception as e:
354
  print("Unhandled Exception: ", str(e))
355
- gr.Warning("Generating error")
356
  output = "I do not know what happened but I could not understand you ."
357
  return output
358
-
359
  def get_sentence(history, llm):
360
  history = [["", None]] if history is None else history
361
  history[-1][1] = ""
 
145
  return wav_buf.read()
146
 
147
  def format_prompt(message, history):
148
+ system_message = f"""
149
  You are a smart mood analyser, who determines user mood. Based on the user input, classify the mood of the user into one of the four moods {Happy, Sad, Instrumental, Party}. If you are finding it difficult to classify into one of these four moods, keep the conversation going on until we classify the user’s mood. Return a single-word reply from one of the options if you have classified. Suppose you classify a sentence as happy, then just respond with "happy".
150
  Note: Do not write anything else other than the classified mood if classified.
151
  Note: If any question or any user text cannot be classified, follow up with a question to know the user's mood until you classify the mood.
 
261
  User: Lets turn up the music and have some fun!
262
  LLM Response: Party
263
  """
264
+ prompt = (
265
+ "<s>[INST]" + system_message + "[/INST]"
266
+ )
267
  for user_prompt, bot_response in history:
268
+ if user_prompt is not None:
269
+ prompt += f"[INST] {user_prompt} [/INST]"
270
+ prompt += f" {bot_response}</s> "
271
+
272
+ if message=="":
273
+ message="Hello"
274
+ prompt += f"[INST] {message} [/INST]"
275
  return prompt
276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  def generate_llm_output(
278
  prompt,
279
  history,
 
283
  top_p=0.95,
284
  stop_words=["<s>","[/INST]", "</s>"]
285
  ):
286
+ temperature = float(temperature)
287
+ if temperature < 1e-2:
288
+ temperature = 1e-2
289
  top_p = float(top_p)
290
 
291
  generate_kwargs = dict(
 
305
  )
306
  output = ""
307
  for response in stream:
308
+ character= response["choices"][0]["text"]
309
+
310
+ if character in stop_words:
311
+ # end of context
312
+ return
313
+
314
+ if emoji.is_emoji(character):
315
+ # Bad emoji not a meaning messes chat from next lines
316
+ return
317
+
318
+ output += response["choices"][0]["text"]
319
+ yield output
320
 
321
  except Exception as e:
322
  print("Unhandled Exception: ", str(e))
323
+ gr.Warning("Unfortunately Mistral is unable to process")
324
  output = "I do not know what happened but I could not understand you ."
325
  return output
326
+
327
  def get_sentence(history, llm):
328
  history = [["", None]] if history is None else history
329
  history[-1][1] = ""