mrfakename commited on
Commit
5bca4fe
·
verified ·
1 Parent(s): 71b86f3
Files changed (1) hide show
  1. app.py +135 -0
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # From https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat
2
+ import os
3
+ from threading import Thread
4
+ from typing import Iterator
5
+
6
+ import gradio as gr
7
+ import spaces
8
+ import torch
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
10
+
11
+ MAX_MAX_NEW_TOKENS = 2048
12
+ DEFAULT_MAX_NEW_TOKENS = 1024
13
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
14
+
15
+ DESCRIPTION = """
16
+ # Chat with Failure 2B Base
17
+
18
+ Chat with [Failure 2B Base](https://huggingface.co/mrfakename/failure-2b-base).
19
+
20
+ ---
21
+
22
+ A quick failed experiment at creating a SLM that can code. Based on [Danube](https://huggingface.co/h2oai/h2o-danube-1.8b-base).
23
+
24
+ Scored 14.8% on HumanEval (FWIW I personally recommend using a quantized 7B model for coding instead of a SLM). Open-sourcing for transparency.
25
+ """
26
+ if not torch.cuda.is_available():
27
+ DESCRIPTION += "\n\nRunning on CPU 🥶 This demo does not work on CPU."
28
+
29
+ if torch.cuda.is_available():
30
+ model_id = "mrfakename/failure-2b-base"
31
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
32
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
33
+ tokenizer.use_default_system_prompt = False
34
+
35
+
36
+ @spaces.GPU
37
+ def generate(
38
+ message: str,
39
+ chat_history: list[tuple[str, str]],
40
+ system_prompt: str,
41
+ max_new_tokens: int = 1024,
42
+ temperature: float = 0.2,
43
+ top_p: float = 0.9,
44
+ top_k: int = 50,
45
+ repetition_penalty: float = 1.2,
46
+ ) -> Iterator[str]:
47
+ conversation = []
48
+ if system_prompt:
49
+ conversation.append({"role": "system", "content": system_prompt})
50
+ for user, assistant in chat_history:
51
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
52
+ conversation.append({"role": "user", "content": message})
53
+
54
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
55
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
56
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
57
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
58
+ input_ids = input_ids.to(model.device)
59
+
60
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
61
+ generate_kwargs = dict(
62
+ {"input_ids": input_ids},
63
+ streamer=streamer,
64
+ max_new_tokens=max_new_tokens,
65
+ do_sample=True,
66
+ top_p=top_p,
67
+ top_k=top_k,
68
+ temperature=temperature,
69
+ num_beams=1,
70
+ repetition_penalty=repetition_penalty,
71
+ )
72
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
73
+ t.start()
74
+
75
+ outputs = []
76
+ for text in streamer:
77
+ outputs.append(text)
78
+ yield "".join(outputs)
79
+
80
+
81
+ chat_interface = gr.ChatInterface(
82
+ fn=generate,
83
+ additional_inputs=[
84
+ gr.Textbox(label="System prompt", lines=6),
85
+ gr.Slider(
86
+ label="Max new tokens",
87
+ minimum=1,
88
+ maximum=MAX_MAX_NEW_TOKENS,
89
+ step=1,
90
+ value=DEFAULT_MAX_NEW_TOKENS,
91
+ ),
92
+ gr.Slider(
93
+ label="Temperature",
94
+ minimum=0.1,
95
+ maximum=4.0,
96
+ step=0.1,
97
+ value=0.2,
98
+ ),
99
+ gr.Slider(
100
+ label="Top-p (nucleus sampling)",
101
+ minimum=0.05,
102
+ maximum=1.0,
103
+ step=0.05,
104
+ value=0.9,
105
+ ),
106
+ gr.Slider(
107
+ label="Top-k",
108
+ minimum=1,
109
+ maximum=1000,
110
+ step=1,
111
+ value=50,
112
+ ),
113
+ gr.Slider(
114
+ label="Repetition penalty",
115
+ minimum=1.0,
116
+ maximum=2.0,
117
+ step=0.05,
118
+ value=1.2,
119
+ ),
120
+ ],
121
+ stop_btn=None,
122
+ examples=[
123
+ ["Hello there! How are you doing?"],
124
+ ["Please explain the Python programming language to me."],
125
+ ["Please write a function in Python to calculate the fibonacci sequence."],
126
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
127
+ ],
128
+ )
129
+
130
+ with gr.Blocks() as demo:
131
+ gr.Markdown(DESCRIPTION)
132
+ chat_interface.render()
133
+
134
+ if __name__ == "__main__":
135
+ demo.queue(max_size=20).launch()