syedmudassir16 commited on
Commit
eac14be
·
verified ·
1 Parent(s): 3993af7

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +78 -31
utils.py CHANGED
@@ -277,38 +277,85 @@ def classify_mood(input_string):
277
  return word, True
278
  return None, False
279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  def generate_llm_output(
281
- prompt, history, temperature=0.1, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.0,
282
- ):
283
- temperature = temperature
284
- top_p = float(top_p)
285
-
286
- generate_kwargs = dict(
287
- temperature=temperature,
288
- max_new_tokens=max_new_tokens,
289
- top_p=top_p,
290
- repetition_penalty=repetition_penalty,
291
- do_sample=True,
292
- seed=42,
293
- )
294
-
295
- formatted_prompt = format_prompt(prompt, history)
296
-
297
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
298
- output = ""
299
-
300
- for response in stream:
301
- output += response.token.text
302
- mood, is_classified = classify_mood(output)
303
- # Print the chatbot's response
304
- if is_classified:
305
- print("Chatbot:", mood.capitalize())
306
- playlist_message = f"Playing {mood.capitalize()} playlist for you!"
307
- output=playlist_message
308
- return output
309
- # yield output
310
- return output
311
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  def get_sentence(history, llm):
313
  history = [["", None]] if history is None else history
314
  history[-1][1] = ""
 
277
  return word, True
278
  return None, False
279
 
280
+ # def generate_llm_output(
281
+ # prompt, history, temperature=0.1, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.0,
282
+ # ):
283
+ # temperature = temperature
284
+ # top_p = float(top_p)
285
+
286
+ # generate_kwargs = dict(
287
+ # temperature=temperature,
288
+ # max_new_tokens=max_new_tokens,
289
+ # top_p=top_p,
290
+ # repetition_penalty=repetition_penalty,
291
+ # do_sample=True,
292
+ # seed=42,
293
+ # )
294
+
295
+ # formatted_prompt = format_prompt(prompt, history)
296
+
297
+ # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
298
+ # output = ""
299
+
300
+ # for response in stream:
301
+ # output += response.token.text
302
+ # mood, is_classified = classify_mood(output)
303
+ # # Print the chatbot's response
304
+ # if is_classified:
305
+ # print("Chatbot:", mood.capitalize())
306
+ # playlist_message = f"Playing {mood.capitalize()} playlist for you!"
307
+ # output=playlist_message
308
+ # return output
309
+ # # yield output
310
+ # return output
311
+
312
  def generate_llm_output(
313
+ prompt,
314
+ history,
315
+ llm,
316
+ temperature=0.8,
317
+ max_tokens=256,
318
+ top_p=0.95,
319
+ stop_words=["<s>","[/INST]", "</s>"]
320
+ ):
321
+ temperature = temperature
322
+
323
+ top_p = float(top_p)
324
+
325
+ generate_kwargs = dict(
326
+ temperature=temperature,
327
+ max_tokens=max_tokens,
328
+ top_p=top_p,
329
+ stop=stop_words
330
+ )
331
+ formatted_prompt = format_prompt(prompt, history)
332
+ try:
333
+ print("LLM Input:", formatted_prompt)
334
+ # Local GGUF
335
+ stream = llm(
336
+ formatted_prompt,
337
+ **generate_kwargs,
338
+ stream=True,
339
+ )
340
+ output = ""
341
+ for response in stream:
342
+ output += response.token.text
343
+ mood, is_classified = classify_mood(output)
344
+ # Print the chatbot's response
345
+ if is_classified:
346
+ print("Chatbot:", mood.capitalize())
347
+ playlist_message = f"Playing {mood.capitalize()} playlist for you!"
348
+ output=playlist_message
349
+ return output
350
+ # yield output
351
+ return output
352
+
353
+ except Exception as e:
354
+ print("Unhandled Exception: ", str(e))
355
+ gr.Warning("Generating error")
356
+ output = "I do not know what happened but I could not understand you ."
357
+ return output
358
+
359
  def get_sentence(history, llm):
360
  history = [["", None]] if history is None else history
361
  history[-1][1] = ""