Removed the option to choose model bc it cant work and added random seed option
Browse files
app.py
CHANGED
@@ -1,15 +1,21 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
|
|
|
|
3 |
|
4 |
def generate(
|
5 |
-
prompt, system_prompt, history, max_new_tokens, repetition_penalty, temperature, top_p, top_k,
|
6 |
):
|
7 |
temperature = float(temperature)
|
8 |
if temperature < 1e-2:
|
9 |
temperature = 1e-2
|
10 |
top_p = float(top_p)
|
11 |
|
12 |
-
|
|
|
|
|
|
|
13 |
|
14 |
generate_kwargs = dict(
|
15 |
temperature=temperature,
|
@@ -87,16 +93,21 @@ additional_inputs=[
|
|
87 |
interactive=True,
|
88 |
info="Higher k means more diverse outputs by considering a range of tokens",
|
89 |
),
|
|
|
|
|
|
|
|
|
|
|
90 |
gr.Number(
|
91 |
-
label="Seed",
|
92 |
value=42,
|
93 |
minimum=1,
|
94 |
-
info="
|
95 |
),
|
96 |
gr.Dropdown(
|
97 |
label="Model",
|
98 |
choices=["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mixtral-8x7B-Mixtral-v0.1"],
|
99 |
-
value
|
100 |
info="Choose the model to use"
|
101 |
)
|
102 |
]
|
@@ -113,8 +124,8 @@ gr.ChatInterface(
|
|
113 |
fn=generate,
|
114 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
115 |
additional_inputs=additional_inputs,
|
116 |
-
title="Mixtral 8x7b
|
117 |
-
description="
|
118 |
examples=examples,
|
119 |
concurrency_limit=20,
|
120 |
).launch(show_api=False)
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
+
import random
|
4 |
+
|
5 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
6 |
|
7 |
def generate(
|
8 |
+
prompt, system_prompt, history, max_new_tokens, repetition_penalty, temperature, top_p, top_k, random_seed, manual_seed,
|
9 |
):
|
10 |
temperature = float(temperature)
|
11 |
if temperature < 1e-2:
|
12 |
temperature = 1e-2
|
13 |
top_p = float(top_p)
|
14 |
|
15 |
+
if random_seed:
|
16 |
+
seed = randint(1, 100000)
|
17 |
+
else:
|
18 |
+
seed = manual_seed
|
19 |
|
20 |
generate_kwargs = dict(
|
21 |
temperature=temperature,
|
|
|
93 |
interactive=True,
|
94 |
info="Higher k means more diverse outputs by considering a range of tokens",
|
95 |
),
|
96 |
+
gr.Checkbox(
|
97 |
+
label="Use Random Seed",
|
98 |
+
value=False,
|
99 |
+
info="Use a random starting point to initiate the generation process instead of the manual one"
|
100 |
+
),
|
101 |
gr.Number(
|
102 |
+
label="Manual Seed",
|
103 |
value=42,
|
104 |
minimum=1,
|
105 |
+
info="Use a manual starting point to initiate the generation process",
|
106 |
),
|
107 |
gr.Dropdown(
|
108 |
label="Model",
|
109 |
choices=["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mixtral-8x7B-Mixtral-v0.1"],
|
110 |
+
value=,
|
111 |
info="Choose the model to use"
|
112 |
)
|
113 |
]
|
|
|
124 |
fn=generate,
|
125 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
126 |
additional_inputs=additional_inputs,
|
127 |
+
title="Mixtral 8x7b Instruct v0.1 Chatbot",
|
128 |
+
description="Chatbot space with costumizable options for model: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 \nSpace made by [Nick088](https://linktr.ee/Nick088) \nIf you get an erorr, you putted a too much high Max_New_Tokens or your system prompt+prompt is too long, shorten up one of these",
|
129 |
examples=examples,
|
130 |
concurrency_limit=20,
|
131 |
).launch(show_api=False)
|