Nick088 commited on
Commit
508d7db
·
verified ·
1 Parent(s): 1822503

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -30
app.py CHANGED
@@ -1,6 +1,5 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
- import random
4
 
5
  client = InferenceClient(
6
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
@@ -14,20 +13,15 @@ def format_prompt(message, history):
14
  prompt += f" {bot_response}</s> "
15
  prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
-
18
  def generate(
19
- prompt, system_prompt, history, max_new_tokens, repetition_penalty, temperature, top_p, top_k, random_seed, manual_seed,
20
  ):
21
  temperature = float(temperature)
22
  if temperature < 1e-2:
23
  temperature = 1e-2
24
  top_p = float(top_p)
25
 
26
- if random_seed:
27
- seed = random.randint(1, 100000)
28
- else:
29
- seed = manual_seed
30
-
31
  generate_kwargs = dict(
32
  temperature=temperature,
33
  max_new_tokens=max_new_tokens,
@@ -47,12 +41,6 @@ def generate(
47
  yield output
48
  return output
49
 
50
- def format_prompt(message, history):
51
- prompt = "ydney"
52
- for user_prompt, bot_response in history:
53
- prompt += f"[INST] {user_prompt} [/INST]"
54
- prompt += f" {bot_response} [INST] {message} [/INST]"
55
- return prompt
56
 
57
  additional_inputs=[
58
  gr.Textbox(
@@ -104,33 +92,28 @@ additional_inputs=[
104
  interactive=True,
105
  info="Higher k means more diverse outputs by considering a range of tokens",
106
  ),
107
- gr.Checkbox(
108
- label="Use Random Seed",
109
- value=False,
110
- info="Use a random starting point to initiate the generation process instead of the manual one"
111
- ),
112
  gr.Number(
113
- label="Manual Seed",
114
  value=42,
115
  minimum=1,
116
- info="Use a manual starting point to initiate the generation process",
117
- ),
118
  ]
119
 
120
- examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, None],
121
- ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None, None],
122
- ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None, None],
123
- ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None, None],
124
- ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None, None],
125
- ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None, None],
126
  ]
127
 
128
  gr.ChatInterface(
129
  fn=generate,
130
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
131
  additional_inputs=additional_inputs,
132
- title="Mixtral 8x7b Instruct v0.1 Chatbot",
133
- description="Chatbot space with costumizable options for model: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 \nSpace made by [Nick088](https://linktr.ee/Nick088) \nIf you get an erorr, you putted a too much high Max_New_Tokens or your system prompt+prompt is too long, shorten up one of these",
134
  examples=examples,
135
  concurrency_limit=20,
136
  ).launch(show_api=False)
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
 
4
  client = InferenceClient(
5
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
13
  prompt += f" {bot_response}</s> "
14
  prompt += f"[INST] {message} [/INST]"
15
  return prompt
16
+
17
  def generate(
18
+ prompt, system_prompt, history, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed
19
  ):
20
  temperature = float(temperature)
21
  if temperature < 1e-2:
22
  temperature = 1e-2
23
  top_p = float(top_p)
24
 
 
 
 
 
 
25
  generate_kwargs = dict(
26
  temperature=temperature,
27
  max_new_tokens=max_new_tokens,
 
41
  yield output
42
  return output
43
 
 
 
 
 
 
 
44
 
45
  additional_inputs=[
46
  gr.Textbox(
 
92
  interactive=True,
93
  info="Higher k means more diverse outputs by considering a range of tokens",
94
  ),
 
 
 
 
 
95
  gr.Number(
96
+ label="Seed",
97
  value=42,
98
  minimum=1,
99
+ info="A starting point to initiate the generation process",
100
+ )
101
  ]
102
 
103
+ examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
104
+ ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
105
+ ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
106
+ ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
107
+ ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
108
+ ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
109
  ]
110
 
111
  gr.ChatInterface(
112
  fn=generate,
113
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
114
  additional_inputs=additional_inputs,
115
+ title="Mixtral-8x7B-Instruct-v0.1",
116
+ description="If you get an erorr, you putted a too much high Max_New_Tokens or your system prompt+prompt is too long, shorten up one of these",
117
  examples=examples,
118
  concurrency_limit=20,
119
  ).launch(show_api=False)