ford442 commited on
Commit
72d4b61
·
verified ·
1 Parent(s): b83785f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -0
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces # If using Hugging Face Spaces
2
+
3
+ import os
4
+
5
+ os.putenv('PYTORCH_NVML_BASED_CUDA_CHECK','1')
6
+ os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
7
+ alloc_conf_parts = [
8
+ 'expandable_segments:True',
9
+ 'pinned_use_background_threads:True' # Specific to pinned memory.
10
+ ]
11
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = ','.join(alloc_conf_parts)
12
+ os.environ["SAFETENSORS_FAST_GPU"] = "1"
13
+ os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
14
+
15
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
16
+ import torch
17
+ import gradio as gr
18
+
19
+ torch.backends.cuda.matmul.allow_tf32 = False
20
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
21
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
22
+ torch.backends.cudnn.allow_tf32 = False
23
+ torch.backends.cudnn.deterministic = False
24
+ torch.backends.cudnn.benchmark = False
25
+ torch.backends.cuda.preferred_blas_library="cublas"
26
+ torch.backends.cuda.preferred_linalg_library="cusolver"
27
+ torch.set_float32_matmul_precision("highest")
28
+
29
+ # --- Model and Tokenizer Configuration ---
30
+ #model_name = "FelixChao/vicuna-33b-coder"
31
+ #model_name = "mradermacher/Wizard-Vicuna-30B-Uncensored-GGUF"
32
+ #model_name = "cognitivecomputations/Wizard-Vicuna-30B-Uncensored"
33
+ #model_name = "TheBloke/Wizard-Vicuna-13B-Uncensored-GGUF"
34
+ #model_name = "cognitivecomputations/Wizard-Vicuna-13B-Uncensored"
35
+ model_name = "cognitivecomputations/Wizard-Vicuna-7B-Uncensored"
36
+
37
+ # --- Quantization Configuration (Example: 4-bit) ---
38
+ # This section is included based on our previous discussion.
39
+ # Remove or comment out if you are not using quantization.
40
+ '''
41
+ print("Setting up 4-bit quantization config...")
42
+ quantization_config_4bit = BitsAndBytesConfig(
43
+ load_in_4bit=True,
44
+ bnb_4bit_use_double_quant=True,
45
+ bnb_4bit_quant_type="nf4",
46
+ bnb_4bit_compute_dtype=torch.bfloat16
47
+ )
48
+
49
+ '''
50
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
51
+
52
+ print(f"Loading model: {model_name} with quantization to {device}")
53
+
54
+ model = AutoModelForCausalLM.from_pretrained(
55
+ model_name,
56
+ # quantization_config=quantization_config_4bit, # Comment out if not using quantization
57
+ device_map="auto",
58
+ offload_folder='./',
59
+ ).to(torch.bfloat16) #.to(torch.device("cuda:0"), torch.bfloat16)
60
+
61
+ print(f"Loading tokenizer: {model_name}")
62
+ tokenizer = AutoTokenizer.from_pretrained(
63
+ model_name,
64
+ use_fast=True
65
+ )
66
+
67
+ # ** MODIFICATION: Define and set the Vicuna chat template **
68
+ # ** DOCUMENTATION: Chat Template **
69
+ # Vicuna models expect a specific chat format. If the tokenizer doesn't have one
70
+ # built-in, we need to set it manually.
71
+ # This template handles a system prompt, user messages, and assistant responses.
72
+ # It will also add the "ASSISTANT:" prompt for generation if needed.
73
+ VICUNA_CHAT_TEMPLATE = (
74
+ "{% if messages[0]['role'] == 'system' %}" # Check if the first message is a system prompt
75
+ "{{ messages[0]['content'] + '\\n\\n' }}" # Add system prompt with two newlines
76
+ "{% set loop_messages = messages[1:] %}" # Slice to loop over remaining messages
77
+ "{% else %}"
78
+ "{% set loop_messages = messages %}" # No system prompt, loop over all messages
79
+ "{% endif %}"
80
+ "{% for message in loop_messages %}" # Loop through user and assistant messages
81
+ "{% if message['role'] == 'user' %}"
82
+ "{{ 'USER: ' + message['content'].strip() + '\\n' }}"
83
+ "{% elif message['role'] == 'assistant' %}"
84
+ "{{ 'ASSISTANT: ' + message['content'].strip() + eos_token + '\\n' }}"
85
+ "{% endif %}"
86
+ "{% endfor %}"
87
+ "{% if add_generation_prompt %}" # If we need to prompt the model for a response
88
+ "{% if messages[-1]['role'] != 'assistant' %}" # And the last message wasn't from the assistant
89
+ "{{ 'ASSISTANT:' }}" # Add the assistant prompt
90
+ "{% endif %}"
91
+ "{% endif %}"
92
+ )
93
+ tokenizer.chat_template = VICUNA_CHAT_TEMPLATE
94
+ print("Manually set Vicuna chat template on the tokenizer.")
95
+
96
+
97
+ if tokenizer.pad_token is None:
98
+ tokenizer.pad_token = tokenizer.eos_token
99
+ # Also update the model config's pad_token_id if you are setting tokenizer.pad_token
100
+ # This is crucial if the model's config doesn't get updated automatically.
101
+ if model.config.pad_token_id is None:
102
+ model.config.pad_token_id = tokenizer.pad_token_id
103
+ print(f"Tokenizer `pad_token` was None, set to `eos_token`: {tokenizer.eos_token}")
104
+
105
+ @spaces.GPU(required=True)
106
+ def generate_code(prompt: str) -> str:
107
+ messages = [
108
+ {"role": "system", "content": "You are a helpful and proficient text-to-image prompt expanding assistant. You should return an imaginative, expanded upon scene suitable for text to image generation."},
109
+ {"role": "user", "content": prompt}
110
+ ]
111
+ try:
112
+ # ** DOCUMENTATION: Applying Chat Template **
113
+ # Now that tokenizer.chat_template is set, this should work.
114
+ text = tokenizer.apply_chat_template(
115
+ messages,
116
+ tokenize=False,
117
+ add_generation_prompt=True # Important to append "ASSISTANT:"
118
+ )
119
+ print(f"Formatted prompt using chat template:\n{text}") # For debugging
120
+ except Exception as e:
121
+ print(f"Error applying chat template: {e}")
122
+ # Provide a more informative error or fallback if needed
123
+ return f"Error: Could not apply chat template. Details: {e}. Ensure the tokenizer has a valid `chat_template` attribute."
124
+
125
+ # Determine device for inputs if model is on multiple devices
126
+ # For device_map="auto", input tensors should go to the device of the first model block.
127
+ input_device = model.hf_device_map.get("", next(iter(model.hf_device_map.values()))) if hasattr(model, "hf_device_map") else model.device
128
+
129
+ model_inputs = tokenizer([text], return_tensors="pt").to(input_device)
130
+
131
+ with torch.no_grad():
132
+ generated_ids = model.generate(
133
+ **model_inputs, # Pass tokenized inputs
134
+ max_new_tokens=192,
135
+ min_new_tokens=128,
136
+ do_sample=True,
137
+ temperature=0.75,
138
+ top_p=0.85,
139
+ pad_token_id=tokenizer.eos_token_id # Use EOS token for padding
140
+ )
141
+
142
+ response_ids = generated_ids[0][len(model_inputs.input_ids[0]):]
143
+ response = tokenizer.decode(response_ids, skip_special_tokens=True)
144
+ return response.strip()
145
+
146
+ # --- Gradio Interface ---
147
+ with gr.Blocks(title="Vicuna 33B Coder") as demo:
148
+ with gr.Tab("Code Chat"):
149
+ gr.Markdown("# Vicuna 33B Coder\nProvide a prompt to generate code.")
150
+ with gr.Row():
151
+ prompt_input = gr.Textbox( # Renamed to avoid conflict with 'prompt' variable in function scope
152
+ label="Prompt",
153
+ show_label=True,
154
+ lines=3,
155
+ placeholder="Enter your coding prompt here...",
156
+ )
157
+ run_button = gr.Button("Generate Code", variant="primary")
158
+ with gr.Row():
159
+ result_output = gr.Code( # Renamed
160
+ label="Generated Code",
161
+ show_label=True,
162
+ language="python",
163
+ lines=20,
164
+ )
165
+ gr.on(
166
+ triggers=[
167
+ run_button.click,
168
+ prompt_input.submit
169
+ ],
170
+ fn=generate_code,
171
+ inputs=[prompt_input],
172
+ outputs=[result_output],
173
+ )
174
+
175
+ if __name__ == "__main__":
176
+ demo.launch(share=False, debug=True)