Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ def format_prompt(message, history):
|
|
15 |
return prompt
|
16 |
|
17 |
def generate(
|
18 |
-
prompt, history, system_prompt, temperature=0.9, max_new_tokens=1000, top_p=0.95, repetition_penalty=1.0,
|
19 |
):
|
20 |
temperature = float(temperature)
|
21 |
if temperature < 1e-2:
|
@@ -28,7 +28,7 @@ def generate(
|
|
28 |
top_p=top_p,
|
29 |
repetition_penalty=repetition_penalty,
|
30 |
do_sample=True,
|
31 |
-
seed=
|
32 |
)
|
33 |
|
34 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
@@ -46,6 +46,15 @@ additional_inputs=[
|
|
46 |
label="System Prompt",
|
47 |
interactive=True,
|
48 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
gr.Slider(
|
50 |
label="Temperature",
|
51 |
value=0.9,
|
@@ -56,13 +65,13 @@ additional_inputs=[
|
|
56 |
info="Higher values produce more diverse outputs",
|
57 |
),
|
58 |
gr.Slider(
|
59 |
-
label="
|
60 |
-
value=
|
61 |
-
minimum=0,
|
62 |
-
maximum=
|
63 |
-
step=
|
64 |
interactive=True,
|
65 |
-
info="
|
66 |
),
|
67 |
gr.Slider(
|
68 |
label="Top-p (nucleus sampling)",
|
@@ -74,13 +83,19 @@ additional_inputs=[
|
|
74 |
info="Higher values sample more low-probability tokens",
|
75 |
),
|
76 |
gr.Slider(
|
77 |
-
label="
|
78 |
-
value=
|
79 |
-
minimum=
|
80 |
-
maximum=
|
81 |
step=0.05,
|
82 |
interactive=True,
|
83 |
-
info="
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
)
|
85 |
]
|
86 |
|
|
|
15 |
return prompt
|
16 |
|
17 |
def generate(
|
18 |
+
prompt, history, system_prompt, temperature=0.9, max_new_tokens=1000, top_p=0.95, repetition_penalty=1.0, seed=42
|
19 |
):
|
20 |
temperature = float(temperature)
|
21 |
if temperature < 1e-2:
|
|
|
28 |
top_p=top_p,
|
29 |
repetition_penalty=repetition_penalty,
|
30 |
do_sample=True,
|
31 |
+
seed=seed,
|
32 |
)
|
33 |
|
34 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
|
|
46 |
label="System Prompt",
|
47 |
interactive=True,
|
48 |
),
|
49 |
+
gr.Slider(
|
50 |
+
label="Max new tokens",
|
51 |
+
value=1000,
|
52 |
+
minimum=0,
|
53 |
+
maximum=32768,
|
54 |
+
step=64,
|
55 |
+
interactive=True,
|
56 |
+
info="The maximum numbers of new tokens, controls how long is the output",
|
57 |
+
),
|
58 |
gr.Slider(
|
59 |
label="Temperature",
|
60 |
value=0.9,
|
|
|
65 |
info="Higher values produce more diverse outputs",
|
66 |
),
|
67 |
gr.Slider(
|
68 |
+
label="Repetition penalty",
|
69 |
+
value=1.2,
|
70 |
+
minimum=1.0,
|
71 |
+
maximum=2.0,
|
72 |
+
step=0.05,
|
73 |
interactive=True,
|
74 |
+
info="Penalize repeated tokens, making the AI repeat less itself",
|
75 |
),
|
76 |
gr.Slider(
|
77 |
label="Top-p (nucleus sampling)",
|
|
|
83 |
info="Higher values sample more low-probability tokens",
|
84 |
),
|
85 |
gr.Slider(
|
86 |
+
label="Top-k",
|
87 |
+
value=0.90,
|
88 |
+
minimum=0.0,
|
89 |
+
maximum=1,
|
90 |
step=0.05,
|
91 |
interactive=True,
|
92 |
+
info="Higher k means more diverse outputs by considering a range of tokens",
|
93 |
+
),
|
94 |
+
gr.Number(
|
95 |
+
label="Seed",
|
96 |
+
value=42,
|
97 |
+
minimum=1,
|
98 |
+
info="A starting point to initiate the generation process",
|
99 |
)
|
100 |
]
|
101 |
|