Leri777 commited on
Commit
fc6c66a
·
verified ·
1 Parent(s): fc338f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -24
app.py CHANGED
@@ -1,52 +1,100 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
  def format_prompt(message, history):
7
- prompt = "<s>"
8
- for user_prompt, bot_response in history:
9
- prompt += f"[INST] {user_prompt} [/INST]"
10
- prompt += f" {bot_response}</s> "
11
- prompt += f"[INST] {message} [/INST]"
12
- return prompt
13
 
14
  def generate(
15
- prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
16
  ):
17
- temperature = float(temperature)
18
- if temperature < 1e-2:
19
- temperature = 1e-2
20
  top_p = float(top_p)
21
 
22
  generate_kwargs = dict(
23
  temperature=temperature,
24
- max_new_tokens=max_new_tokens,
25
  top_p=top_p,
26
  repetition_penalty=repetition_penalty,
27
  do_sample=True,
28
  seed=42,
29
  )
30
 
31
- formatted_prompt = format_prompt(prompt, history)
32
 
33
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
 
 
 
 
 
 
 
 
 
 
34
  output = ""
35
 
36
  for response in stream:
37
  output += response.token.text
38
  yield output
39
- return output
40
 
41
-
42
- mychatbot = gr.Chatbot(
43
- avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
 
 
 
 
 
 
 
 
 
44
 
45
- demo = gr.ChatInterface(fn=generate,
46
- chatbot=mychatbot,
47
- #title="WebpyGPT",
48
- retry_btn=None,
49
- undo_btn=None
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- demo.queue().launch(show_api=False)
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
+ import datetime
4
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
  def format_prompt(message, history):
8
+ prompt = "<s>"
9
+ for user_prompt, bot_response in history:
10
+ prompt += f"[INST] {user_prompt} [/INST]"
11
+ prompt += f" {bot_response}</s> "
12
+ prompt += f"[INST] {message} [/INST]"
13
+ return prompt
14
 
15
  def generate(
16
+ message, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
17
  ):
18
+ temperature = max(float(temperature), 1e-2)
 
 
19
  top_p = float(top_p)
20
 
21
  generate_kwargs = dict(
22
  temperature=temperature,
23
+ max_new_tokens=int(max_new_tokens),
24
  top_p=top_p,
25
  repetition_penalty=repetition_penalty,
26
  do_sample=True,
27
  seed=42,
28
  )
29
 
30
+ formatted_prompt = format_prompt(message, history)
31
 
32
+ # Логирование промпта
33
+ with open("conversation_log.txt", "a", encoding="utf-8") as f:
34
+ f.write(f"{datetime.datetime.now()}\n")
35
+ f.write(f"Промпт: {formatted_prompt}\n")
36
+
37
+ stream = client.text_generation(
38
+ formatted_prompt,
39
+ **generate_kwargs,
40
+ stream=True,
41
+ details=True,
42
+ return_full_text=False,
43
+ )
44
  output = ""
45
 
46
  for response in stream:
47
  output += response.token.text
48
  yield output
 
49
 
50
+ # Логирование ответа
51
+ with open("conversation_log.txt", "a", encoding="utf-8") as f:
52
+ f.write(f"Ответ: {output}\n\n")
53
+
54
+ # Обновление истории
55
+ history.append((message, output))
56
+
57
+ def update_history(instruction, model_answer):
58
+ history = []
59
+ if instruction and model_answer:
60
+ history.append((instruction, model_answer))
61
+ return history
62
 
63
+ with gr.Blocks() as demo:
64
+ gr.Markdown("# Чат с Mixtral-8x7B-Instruct-v0.1")
65
+
66
+ instruction = gr.Textbox(label="Instruction", placeholder="Введите начальную инструкцию")
67
+ model_answer = gr.Textbox(label="Model Answer", placeholder="Введите ответ модели на инструкцию")
68
+ set_initial_btn = gr.Button("Установить начальный диалог")
69
+
70
+ chatbot = gr.Chatbot(
71
+ avatar_images=["./user.png", "./botm.png"],
72
+ bubble_full_width=False,
73
+ show_label=False,
74
+ show_copy_button=True,
75
+ likeable=True,
76
+ )
77
+ follow_up_instruction = gr.Textbox(label="Follow-up Instruction", placeholder="Введите ваше сообщение")
78
+
79
+ history_state = gr.State([])
80
+
81
+ set_initial_btn.click(
82
+ fn=update_history,
83
+ inputs=[instruction, model_answer],
84
+ outputs=[history_state],
85
+ )
86
+
87
+ def respond(message, history):
88
+ gen = generate(message, history)
89
+ response = ""
90
+ for res in gen:
91
+ response = res
92
+ return history + [(message, response)], history + [(message, response)]
93
+
94
+ follow_up_instruction.submit(
95
+ fn=respond,
96
+ inputs=[follow_up_instruction, history_state],
97
+ outputs=[chatbot, history_state],
98
+ )
99
 
100
+ demo.queue().launch(show_api=False)