PingVortex commited on
Commit
776e30f
·
verified ·
1 Parent(s): 1ccc3dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -53
app.py CHANGED
@@ -1,82 +1,77 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
 
4
 
5
- # Load model and tokenizer locally
6
  model_name = "GoofyLM/gonzalez-v1"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
10
- torch_dtype=torch.float16, # Use float16 for efficiency
11
- device_map="auto" # Automatically distribute across available GPUs/devices
12
  )
 
 
 
 
 
13
 
14
  def respond(
15
- message,
16
- history: list[tuple[str, str]],
17
- system_message,
18
- max_tokens,
19
- temperature,
20
  top_p,
21
  ):
22
- # Format messages for the model
23
  messages = [{"role": "system", "content": system_message}]
 
24
  for user_msg, assistant_msg in history:
25
  if user_msg:
26
  messages.append({"role": "user", "content": user_msg})
27
  if assistant_msg:
28
  messages.append({"role": "assistant", "content": assistant_msg})
 
29
  messages.append({"role": "user", "content": message})
30
 
31
- # Convert messages to model input format
32
- chat_template = tokenizer.apply_chat_template(
33
- messages,
34
- tokenize=False,
35
- add_generation_prompt=True
36
- )
37
-
38
- # Tokenize the input
39
- inputs = tokenizer(chat_template, return_tensors="pt").to(model.device)
40
-
41
- # Generate response with streaming
42
- input_length = inputs.input_ids.shape[1]
43
- generated_tokens = []
44
-
45
- # Set up generation parameters
46
- gen_kwargs = {
47
- "max_new_tokens": max_tokens,
48
- "temperature": temperature,
49
- "top_p": top_p,
50
- "do_sample": temperature > 0,
51
- "pad_token_id": tokenizer.eos_token_id,
52
- }
53
 
54
- # Stream the generation
55
- response = ""
56
- for output in model.generate(
57
  **inputs,
58
- **gen_kwargs,
59
- streamer=transformers.TextStreamer(tokenizer, skip_prompt=True),
60
- ):
61
- # Skip input tokens
62
- if len(output) <= input_length:
63
- continue
64
-
65
- # Get new tokens
66
- new_tokens = output[input_length:]
67
- decoded = tokenizer.decode(new_tokens, skip_special_tokens=True)
68
- response = decoded
 
 
 
 
 
69
  yield response
70
 
 
71
  demo = gr.ChatInterface(
72
  respond,
73
  additional_inputs=[
74
- gr.Textbox(value="You are a Gonzalez-v1.", label="System message"),
75
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
76
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
77
- gr.Slider(
78
- minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
79
- ),
80
  ],
81
  )
82
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
3
  import torch
4
+ from threading import Thread
5
 
6
+ # Load model and tokenizer
7
  model_name = "GoofyLM/gonzalez-v1"
 
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
10
+ device_map="auto",
11
+ torch_dtype=torch.float16
12
  )
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+
15
+ # Set pad token if missing
16
+ if tokenizer.pad_token is None:
17
+ tokenizer.pad_token = tokenizer.eos_token
18
 
19
  def respond(
20
+ message,
21
+ history: list[tuple[str, str]],
22
+ system_message,
23
+ max_tokens,
24
+ temperature,
25
  top_p,
26
  ):
27
+ # Build conversation messages
28
  messages = [{"role": "system", "content": system_message}]
29
+
30
  for user_msg, assistant_msg in history:
31
  if user_msg:
32
  messages.append({"role": "user", "content": user_msg})
33
  if assistant_msg:
34
  messages.append({"role": "assistant", "content": assistant_msg})
35
+
36
  messages.append({"role": "user", "content": message})
37
 
38
+ # Format prompt using chat template
39
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
40
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
41
+
42
+ # Set up streaming
43
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ # Configure generation parameters
46
+ do_sample = temperature > 0 or top_p < 1.0
47
+ generation_kwargs = dict(
48
  **inputs,
49
+ streamer=streamer,
50
+ max_new_tokens=max_tokens,
51
+ temperature=temperature,
52
+ top_p=top_p,
53
+ do_sample=do_sample,
54
+ pad_token_id=tokenizer.pad_token_id
55
+ )
56
+
57
+ # Start generation in separate thread
58
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
59
+ thread.start()
60
+
61
+ # Stream response
62
+ response = ""
63
+ for token in streamer:
64
+ response += token
65
  yield response
66
 
67
+ # Create Gradio interface
68
  demo = gr.ChatInterface(
69
  respond,
70
  additional_inputs=[
71
+ gr.Textbox(value="You are Gonzalez.", label="System message"),
72
+ gr.Slider(1, 2048, value=72, label="Max new tokens"),
73
+ gr.Slider(0.1, 4.0, value=0.7, label="Temperature"),
74
+ gr.Slider(0.1, 1.0, value=0.95, label="Top-p (nucleus sampling)"),
 
 
75
  ],
76
  )
77