realitisoft commited on
Commit
6df3345
·
verified ·
1 Parent(s): 8524a1d

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +105 -44
  2. requirements.txt +9 -1
  3. style.css +16 -0
app.py CHANGED
@@ -1,64 +1,125 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
 
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
38
 
39
- response += token
40
- yield response
 
 
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
 
 
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
 
 
 
 
 
 
 
56
  step=0.05,
 
 
 
57
  label="Top-p (nucleus sampling)",
 
 
 
 
 
 
 
 
 
 
 
58
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  ],
 
 
 
 
 
 
 
60
  )
61
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
+ #!/usr/bin/env python
 
 
 
 
 
 
2
 
3
+ import os
4
+ from threading import Thread
5
+ from typing import Iterator
6
+ import spaces
7
+ import gradio as gr
8
+ import torch
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
10
 
11
+ MAX_MAX_NEW_TOKENS = 2048
12
+ DEFAULT_MAX_NEW_TOKENS = 1024
13
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
 
 
 
 
 
 
14
 
15
+ model_id = "utter-project/EuroLLM-1.7B-Instruct"
16
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
17
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
 
 
18
 
19
+ @spaces.GPU
20
+ def generate(
21
+ message: str,
22
+ chat_history: list[tuple[str, str]],
23
+ max_new_tokens: int = 1024,
24
+ temperature: float = 0.06,
25
+ top_p: float = 0.95,
26
+ top_k: int = 40,
27
+ repetition_penalty: float = 1.2,
28
+ ) -> Iterator[str]:
29
+ conversation = []
30
+ for user, assistant in chat_history:
31
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
32
+ conversation.append({"role": "user", "content": message})
33
 
34
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
35
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
36
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
37
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
38
+ input_ids = input_ids.to(model.device)
39
 
40
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
41
+ generate_kwargs = dict(
42
+ {"input_ids": input_ids},
43
+ streamer=streamer,
44
+ max_new_tokens=max_new_tokens,
45
+ do_sample=True,
46
  top_p=top_p,
47
+ top_k=top_k,
48
+ temperature=temperature,
49
+ num_beams=1,
50
+ repetition_penalty=repetition_penalty,
51
+ )
52
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
53
+ t.start()
54
 
55
+ outputs = []
56
+ for text in streamer:
57
+ outputs.append(text)
58
+ yield "".join(outputs)
59
 
60
 
61
+ chat_interface = gr.ChatInterface(
62
+ fn=generate,
63
+ chatbot=gr.Chatbot(height=450,
64
+ label="utter-project/EuroLLM-1.7B-Instruct",
65
+ show_share_button=True,
66
+ ),
67
+ cache_examples=False,
68
  additional_inputs=[
 
 
 
69
  gr.Slider(
70
+ label="Max new tokens",
71
+ minimum=1,
72
+ maximum=MAX_MAX_NEW_TOKENS,
73
+ step=1,
74
+ value=DEFAULT_MAX_NEW_TOKENS,
75
+ ),
76
+ gr.Slider(
77
+ label="Temperature",
78
+ minimum=0.05,
79
+ maximum=1.2,
80
  step=0.05,
81
+ value=0.2,
82
+ ),
83
+ gr.Slider(
84
  label="Top-p (nucleus sampling)",
85
+ minimum=0.05,
86
+ maximum=1.0,
87
+ step=0.05,
88
+ value=0.9,
89
+ ),
90
+ gr.Slider(
91
+ label="Top-k",
92
+ minimum=1,
93
+ maximum=1000,
94
+ step=1,
95
+ value=50,
96
  ),
97
+ gr.Slider(
98
+ label="Repetition penalty",
99
+ minimum=1.0,
100
+ maximum=2.0,
101
+ step=0.05,
102
+ value=1.2,
103
+ ),
104
+ ],
105
+ examples=[
106
+ ["Describe the significance of the Eiffel Tower in French culture and history."],
107
+ ["Что такое 'загадочная русская душа' и как это понятие отражается в русской литературе?"], # Russian: What is the "mysterious Russian soul" and how is this concept reflected in Russian literature?
108
+ ["Jakie są najbardziej znane polskie tradycje bożonarodzeniowe?"], # Polish: What are the most well-known Polish Christmas traditions?
109
+ ["Welche Rolle spielte die Hanse im mittelalterlichen Europa?"], # German: What role did the Hanseatic League play in medieval Europe?
110
+ ["日本の茶道の精神と作法について説明してください。"] # Japanese: Please explain the spirit and etiquette of Japanese tea ceremony.
111
  ],
112
+ title="utter-project/EuroLLM-1.7B-Instruct",
113
+ description="""utter-project/EuroLLM-1.7B-Instruct quick demo""",
114
+ submit_btn="Generate",
115
+ stop_btn="Stop",
116
+ retry_btn="🔄 Retry",
117
+ undo_btn="↩️ Undo",
118
+ clear_btn="🗑️ Clear",
119
  )
120
 
121
+ with gr.Blocks(css="style.css") as demo:
122
+ chat_interface.render()
123
 
124
  if __name__ == "__main__":
125
+ demo.queue(max_size=20).launch()
requirements.txt CHANGED
@@ -1 +1,9 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ bitsandbytes
3
+ gradio
4
+ scipy
5
+ sentencepiece
6
+ spaces
7
+ torch~=2.2.0
8
+ transformers
9
+ numpy<2
style.css ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: white;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }
11
+
12
+ .contain {
13
+ max-width: 900px;
14
+ margin: auto;
15
+ padding-top: 1.5rem;
16
+ }