arjunanand13 commited on
Commit
109898e
·
verified ·
1 Parent(s): 039ca7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -4
app.py CHANGED
@@ -17,7 +17,8 @@ pipeline = transformers.pipeline(
17
 
18
  def chat_function(message, history, system_prompt, max_new_tokens, temperature):
19
  messages = []
20
- if history is not None:
 
21
  for user_msg, assistant_msg in history:
22
  messages.append({"role": "user", "content": user_msg})
23
  messages.append({"role": "assistant", "content": assistant_msg})
@@ -32,6 +33,12 @@ def chat_function(message, history, system_prompt, max_new_tokens, temperature):
32
  add_generation_prompt=True
33
  )
34
 
 
 
 
 
 
 
35
  # Adjust the temperature slightly above given to ensure variety
36
  adjusted_temp = temperature + 0.1
37
 
@@ -48,21 +55,22 @@ def chat_function(message, history, system_prompt, max_new_tokens, temperature):
48
  generated_text = outputs[0]["generated_text"]
49
  return generated_text[len(prompt):] # Return the new part of the conversation
50
 
51
- # Update Gradio interface to reflect the potential nullability of history
52
  gr.Interface(
53
  fn=chat_function,
54
  inputs=[
55
  gr.Textbox(placeholder="Enter your message here", label="Your Message"),
56
- gr.JSON(label="Conversation History (format as [[user, assistant], ...])", optional=True), # Marked as optional
57
  gr.Textbox(label="System Prompt"),
58
  gr.Slider(512, 4096, label="Max New Tokens"),
59
- gr.Slider(0.0, 1.0, step=0.1, label="Temperature")
60
  ],
61
  outputs=gr.Textbox(label="AI Response")
62
  ).launch()
63
 
64
 
65
 
 
66
  # def chat_function(message, history, system_prompt,max_new_tokens,temperature):
67
  # messages = [
68
  # {"role": "system", "content": system_prompt},
 
17
 
18
  def chat_function(message, history, system_prompt, max_new_tokens, temperature):
19
  messages = []
20
+ # Check if history is None or empty and handle accordingly
21
+ if history:
22
  for user_msg, assistant_msg in history:
23
  messages.append({"role": "user", "content": user_msg})
24
  messages.append({"role": "assistant", "content": assistant_msg})
 
33
  add_generation_prompt=True
34
  )
35
 
36
+ # Generate the response
37
+ terminators = [
38
+ pipeline.tokenizer.eos_token_id,
39
+ pipeline.tokenizer.convert_tokens_to_ids("")
40
+ ]
41
+
42
  # Adjust the temperature slightly above given to ensure variety
43
  adjusted_temp = temperature + 0.1
44
 
 
55
  generated_text = outputs[0]["generated_text"]
56
  return generated_text[len(prompt):] # Return the new part of the conversation
57
 
58
+ # Update Gradio interface setup
59
  gr.Interface(
60
  fn=chat_function,
61
  inputs=[
62
  gr.Textbox(placeholder="Enter your message here", label="Your Message"),
63
+ gr.JSON(label="Conversation History (format as [[user, assistant], ...])"), # Without optional
64
  gr.Textbox(label="System Prompt"),
65
  gr.Slider(512, 4096, label="Max New Tokens"),
66
+ gr.Slider(0.0, 1.0, step=0.1, label="Temperature")
67
  ],
68
  outputs=gr.Textbox(label="AI Response")
69
  ).launch()
70
 
71
 
72
 
73
+
74
  # def chat_function(message, history, system_prompt,max_new_tokens,temperature):
75
  # messages = [
76
  # {"role": "system", "content": system_prompt},