AbstractPhil commited on
Commit
fae170d
Β·
verified Β·
1 Parent(s): 4d83981

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -9
app.py CHANGED
@@ -48,14 +48,23 @@ tok = Tokenizer.from_file(tokenizer_file)
48
  # ----------------------------
49
  # πŸ’¬ Gradio Chat Wrapper
50
  # ----------------------------
51
- def beeper_reply(message, history, temperature, top_k, top_p):
 
 
 
 
 
 
 
 
52
  # Build conversation context
53
  prompt_parts = []
54
- for h in history:
55
- if h[0]: # User message exists
56
- prompt_parts.append(f"User: {h[0]}")
57
- if h[1]: # Assistant response exists
58
- prompt_parts.append(f"Beeper: {h[1]}")
 
59
 
60
  # Add current message
61
  prompt_parts.append(f"User: {message}")
@@ -70,9 +79,9 @@ def beeper_reply(message, history, temperature, top_k, top_p):
70
  cfg=config,
71
  prompt=prompt,
72
  max_new_tokens=128,
73
- temperature=temperature,
74
  top_k=int(top_k),
75
- top_p=top_p,
76
  repetition_penalty=config["repetition_penalty"],
77
  presence_penalty=config["presence_penalty"],
78
  frequency_penalty=config["frequency_penalty"],
@@ -96,7 +105,7 @@ demo = gr.ChatInterface(
96
  gr.Slider(1, 100, value=40, step=1, label="Top-k"),
97
  gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p"),
98
  ],
99
- chatbot=gr.Chatbot(label="Chat with Beeper πŸ€–"),
100
  title="Beeper - A Rose-based Tiny Language Model",
101
  description="Hello! I'm Beeper, a small language model trained with love and care. Please be patient with me - I'm still learning! πŸ’•",
102
  examples=[
@@ -105,6 +114,7 @@ demo = gr.ChatInterface(
105
  ["What do you like to do for fun?"],
106
  ],
107
  theme=gr.themes.Soft(),
 
108
  )
109
 
110
  if __name__ == "__main__":
 
48
  # ----------------------------
49
  # πŸ’¬ Gradio Chat Wrapper
50
  # ----------------------------
51
+ def beeper_reply(message, history, temperature=None, top_k=None, top_p=None):
52
+ # Use defaults if not provided (for examples caching)
53
+ if temperature is None:
54
+ temperature = 0.9
55
+ if top_k is None:
56
+ top_k = 40
57
+ if top_p is None:
58
+ top_p = 0.9
59
+
60
  # Build conversation context
61
  prompt_parts = []
62
+ if history:
63
+ for h in history:
64
+ if h[0]: # User message exists
65
+ prompt_parts.append(f"User: {h[0]}")
66
+ if h[1]: # Assistant response exists
67
+ prompt_parts.append(f"Beeper: {h[1]}")
68
 
69
  # Add current message
70
  prompt_parts.append(f"User: {message}")
 
79
  cfg=config,
80
  prompt=prompt,
81
  max_new_tokens=128,
82
+ temperature=float(temperature),
83
  top_k=int(top_k),
84
+ top_p=float(top_p),
85
  repetition_penalty=config["repetition_penalty"],
86
  presence_penalty=config["presence_penalty"],
87
  frequency_penalty=config["frequency_penalty"],
 
105
  gr.Slider(1, 100, value=40, step=1, label="Top-k"),
106
  gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p"),
107
  ],
108
+ chatbot=gr.Chatbot(label="Chat with Beeper πŸ€–", type="messages"),
109
  title="Beeper - A Rose-based Tiny Language Model",
110
  description="Hello! I'm Beeper, a small language model trained with love and care. Please be patient with me - I'm still learning! πŸ’•",
111
  examples=[
 
114
  ["What do you like to do for fun?"],
115
  ],
116
  theme=gr.themes.Soft(),
117
+ cache_examples=False, # Disable caching to avoid the startup issue
118
  )
119
 
120
  if __name__ == "__main__":