Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
-
import requests
|
3 |
-
import json
|
4 |
import datetime
|
5 |
import tempfile
|
6 |
-
import subprocess
|
7 |
import black
|
8 |
from streamlit_ace import st_ace
|
9 |
from streamlit_extras.colored_header import colored_header
|
@@ -11,18 +8,17 @@ from streamlit_extras.add_vertical_space import add_vertical_space
|
|
11 |
import re
|
12 |
from typing import Optional, Dict, List
|
13 |
import ast
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
except Exception as e:
|
24 |
-
st.error(f"Error fetching models: {str(e)}")
|
25 |
-
return []
|
26 |
|
27 |
def clear_chat():
|
28 |
"""Clear the chat history"""
|
@@ -50,34 +46,33 @@ def handle_file_upload():
|
|
50 |
return "Binary file uploaded"
|
51 |
return None
|
52 |
|
53 |
-
def generate_response(prompt
|
54 |
-
"""Generate response
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
"system": system_prompt,
|
60 |
-
"temperature": temperature,
|
61 |
-
"max_tokens": max_tokens,
|
62 |
-
"stream": stream
|
63 |
-
}
|
64 |
|
65 |
try:
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
81 |
except Exception as e:
|
82 |
st.error(f"Error generating response: {str(e)}")
|
83 |
return f"Error: {str(e)}"
|
@@ -122,8 +117,8 @@ class CodeAnalyzer:
|
|
122 |
return context
|
123 |
|
124 |
class CodeCompletion:
|
125 |
-
def __init__(self
|
126 |
-
|
127 |
|
128 |
def get_completion_suggestions(self, code: str, context: Dict) -> str:
|
129 |
"""Generate code completion suggestions based on context"""
|
@@ -138,20 +133,16 @@ Classes: {', '.join(context['classes'])}
|
|
138 |
|
139 |
Please complete or continue this code in a natural way."""
|
140 |
|
141 |
-
|
142 |
-
"You are a Python coding assistant. Provide only code completion, no explanations.")
|
143 |
-
return response
|
144 |
|
145 |
-
def handle_code_continuation(incomplete_code: str
|
146 |
"""Handle continuation of incomplete code"""
|
147 |
prompt = f"""Complete the following Python code:
|
148 |
{incomplete_code}
|
149 |
|
150 |
Provide only the completion part that would make this code syntactically complete and logical."""
|
151 |
|
152 |
-
|
153 |
-
"You are a Python coding assistant. Complete the code naturally.")
|
154 |
-
return response
|
155 |
|
156 |
def format_code(code: str) -> str:
|
157 |
"""Format Python code using black"""
|
@@ -184,7 +175,7 @@ def init_session_state():
|
|
184 |
def setup_page_config():
|
185 |
"""Setup page configuration and styling"""
|
186 |
st.set_page_config(
|
187 |
-
page_title="
|
188 |
page_icon="🤖",
|
189 |
layout="wide",
|
190 |
initial_sidebar_state="expanded"
|
@@ -192,14 +183,12 @@ def setup_page_config():
|
|
192 |
|
193 |
st.markdown("""
|
194 |
<style>
|
195 |
-
/* Main container styling */
|
196 |
.main {
|
197 |
max-width: 1200px;
|
198 |
margin: 0 auto;
|
199 |
padding: 2rem;
|
200 |
}
|
201 |
|
202 |
-
/* Message container styling */
|
203 |
.stChatMessage {
|
204 |
background-color: #ffffff;
|
205 |
border-radius: 8px;
|
@@ -208,7 +197,6 @@ def setup_page_config():
|
|
208 |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
209 |
}
|
210 |
|
211 |
-
/* Chat input styling */
|
212 |
.stChatInputContainer {
|
213 |
border-radius: 8px;
|
214 |
border: 1px solid #e0e0e0;
|
@@ -216,14 +204,12 @@ def setup_page_config():
|
|
216 |
background-color: #ffffff;
|
217 |
}
|
218 |
|
219 |
-
/* Code editor styling */
|
220 |
.code-editor {
|
221 |
border-radius: 8px;
|
222 |
margin: 1rem 0;
|
223 |
border: 1px solid #e0e0e0;
|
224 |
}
|
225 |
|
226 |
-
/* Code snippet container */
|
227 |
.code-snippet {
|
228 |
background-color: #f8fafc;
|
229 |
padding: 1rem;
|
@@ -231,7 +217,6 @@ def setup_page_config():
|
|
231 |
margin: 0.5rem 0;
|
232 |
}
|
233 |
|
234 |
-
/* Code completion suggestions */
|
235 |
.completion-suggestion {
|
236 |
background-color: #f1f5f9;
|
237 |
padding: 0.5rem;
|
@@ -267,7 +252,7 @@ def code_editor_section():
|
|
267 |
if code_content:
|
268 |
code_analyzer = CodeAnalyzer()
|
269 |
context = code_analyzer.get_context(code_content)
|
270 |
-
completion = CodeCompletion(
|
271 |
suggestions = completion.get_completion_suggestions(code_content, context)
|
272 |
st.code(suggestions, language="python")
|
273 |
|
@@ -276,29 +261,24 @@ def main():
|
|
276 |
setup_page_config()
|
277 |
init_session_state()
|
278 |
|
|
|
|
|
|
|
|
|
279 |
# Sidebar configuration
|
280 |
with st.sidebar:
|
281 |
colored_header(label="Model Settings", description="Configure your chat parameters", color_name="blue-70")
|
282 |
|
283 |
-
available_models = get_ollama_models()
|
284 |
-
if not available_models:
|
285 |
-
st.error("⚠️ No Ollama models found. Please make sure Ollama is running and models are installed.")
|
286 |
-
st.stop()
|
287 |
-
|
288 |
-
selected_model = st.selectbox("Choose a model", available_models, index=0 if available_models else None)
|
289 |
-
st.session_state.selected_model = selected_model
|
290 |
-
|
291 |
with st.expander("Advanced Settings", expanded=False):
|
292 |
temperature = st.slider("Temperature", 0.0, 2.0, 0.7, 0.1)
|
293 |
max_tokens = st.number_input("Max Tokens", 50, 4096, 2048)
|
294 |
system_prompt = st.text_area("System Prompt", st.session_state.system_prompt)
|
295 |
-
stream_output = st.checkbox("Stream Output", value=True)
|
296 |
|
297 |
if st.button("Clear Chat"):
|
298 |
clear_chat()
|
299 |
|
300 |
-
st.title("🤖
|
301 |
-
st.caption(
|
302 |
|
303 |
# Main interface tabs
|
304 |
tab1, tab2 = st.tabs(["Chat", "Code Editor"])
|
@@ -324,32 +304,26 @@ def main():
|
|
324 |
if not CodeAnalyzer.is_code_complete(code):
|
325 |
st.info("This code block appears to be incomplete. Would you like to complete it?")
|
326 |
if st.button("Complete Code", key=f"complete_{len(code)}"):
|
327 |
-
completion = handle_code_continuation(code
|
328 |
st.code(completion, language="python")
|
329 |
|
330 |
# Chat input
|
331 |
-
if prompt := st.chat_input("Message (use @ to attach a file
|
332 |
with st.chat_message("user"):
|
333 |
st.markdown(prompt)
|
334 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
335 |
|
336 |
with st.chat_message("assistant"):
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
message_placeholder.markdown(response)
|
341 |
-
final_response = response
|
342 |
-
else:
|
343 |
-
with st.spinner("Thinking..."):
|
344 |
-
final_response = generate_response(prompt, selected_model, temperature, max_tokens, system_prompt, stream=False)
|
345 |
-
st.markdown(final_response)
|
346 |
|
347 |
# Store code blocks in context
|
348 |
-
code_blocks = CodeAnalyzer.extract_code_blocks(
|
349 |
if code_blocks:
|
350 |
st.session_state.last_code_state = code_blocks[-1]
|
351 |
|
352 |
-
st.session_state.messages.append({"role": "assistant", "content":
|
353 |
|
354 |
with tab2:
|
355 |
code_editor_section()
|
@@ -357,7 +331,7 @@ def main():
|
|
357 |
# Footer
|
358 |
add_vertical_space(2)
|
359 |
st.markdown("---")
|
360 |
-
st.markdown("Made with ❤️ using Streamlit and
|
361 |
|
362 |
if __name__ == "__main__":
|
363 |
main()
|
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
import datetime
|
3 |
import tempfile
|
|
|
4 |
import black
|
5 |
from streamlit_ace import st_ace
|
6 |
from streamlit_extras.colored_header import colored_header
|
|
|
8 |
import re
|
9 |
from typing import Optional, Dict, List
|
10 |
import ast
|
11 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
12 |
+
import torch
|
13 |
|
14 |
+
# Initialize model and tokenizer globally
|
15 |
+
@st.cache_resource
|
16 |
+
def load_model_and_tokenizer():
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct")
|
18 |
+
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct")
|
19 |
+
if torch.cuda.is_available():
|
20 |
+
model = model.to("cuda")
|
21 |
+
return model, tokenizer
|
|
|
|
|
|
|
22 |
|
23 |
def clear_chat():
|
24 |
"""Clear the chat history"""
|
|
|
46 |
return "Binary file uploaded"
|
47 |
return None
|
48 |
|
49 |
+
def generate_response(prompt: str, temperature: float, max_tokens: int, system_prompt: str) -> str:
|
50 |
+
"""Generate response using the Qwen model"""
|
51 |
+
model, tokenizer = load_model_and_tokenizer()
|
52 |
+
|
53 |
+
# Format the input with system prompt
|
54 |
+
full_prompt = f"System: {system_prompt}\n\nUser: {prompt}\n\nAssistant:"
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
try:
|
57 |
+
inputs = tokenizer(full_prompt, return_tensors="pt", padding=True)
|
58 |
+
if torch.cuda.is_available():
|
59 |
+
inputs = {k: v.to("cuda") for k, v in inputs.items()}
|
60 |
+
|
61 |
+
# Generate response
|
62 |
+
outputs = model.generate(
|
63 |
+
**inputs,
|
64 |
+
max_new_tokens=max_tokens,
|
65 |
+
temperature=temperature,
|
66 |
+
do_sample=True,
|
67 |
+
pad_token_id=tokenizer.pad_token_id
|
68 |
+
)
|
69 |
+
|
70 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
71 |
+
|
72 |
+
# Extract assistant's response
|
73 |
+
response = response.split("Assistant:")[-1].strip()
|
74 |
+
return response
|
75 |
+
|
76 |
except Exception as e:
|
77 |
st.error(f"Error generating response: {str(e)}")
|
78 |
return f"Error: {str(e)}"
|
|
|
117 |
return context
|
118 |
|
119 |
class CodeCompletion:
|
120 |
+
def __init__(self):
|
121 |
+
pass
|
122 |
|
123 |
def get_completion_suggestions(self, code: str, context: Dict) -> str:
|
124 |
"""Generate code completion suggestions based on context"""
|
|
|
133 |
|
134 |
Please complete or continue this code in a natural way."""
|
135 |
|
136 |
+
return generate_response(prompt, 0.3, 500, "You are a Python coding assistant. Provide only code completion, no explanations.")
|
|
|
|
|
137 |
|
138 |
+
def handle_code_continuation(incomplete_code: str) -> str:
|
139 |
"""Handle continuation of incomplete code"""
|
140 |
prompt = f"""Complete the following Python code:
|
141 |
{incomplete_code}
|
142 |
|
143 |
Provide only the completion part that would make this code syntactically complete and logical."""
|
144 |
|
145 |
+
return generate_response(prompt, 0.3, 500, "You are a Python coding assistant. Complete the code naturally.")
|
|
|
|
|
146 |
|
147 |
def format_code(code: str) -> str:
|
148 |
"""Format Python code using black"""
|
|
|
175 |
def setup_page_config():
|
176 |
"""Setup page configuration and styling"""
|
177 |
st.set_page_config(
|
178 |
+
page_title="Qwen Coder Chat",
|
179 |
page_icon="🤖",
|
180 |
layout="wide",
|
181 |
initial_sidebar_state="expanded"
|
|
|
183 |
|
184 |
st.markdown("""
|
185 |
<style>
|
|
|
186 |
.main {
|
187 |
max-width: 1200px;
|
188 |
margin: 0 auto;
|
189 |
padding: 2rem;
|
190 |
}
|
191 |
|
|
|
192 |
.stChatMessage {
|
193 |
background-color: #ffffff;
|
194 |
border-radius: 8px;
|
|
|
197 |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
198 |
}
|
199 |
|
|
|
200 |
.stChatInputContainer {
|
201 |
border-radius: 8px;
|
202 |
border: 1px solid #e0e0e0;
|
|
|
204 |
background-color: #ffffff;
|
205 |
}
|
206 |
|
|
|
207 |
.code-editor {
|
208 |
border-radius: 8px;
|
209 |
margin: 1rem 0;
|
210 |
border: 1px solid #e0e0e0;
|
211 |
}
|
212 |
|
|
|
213 |
.code-snippet {
|
214 |
background-color: #f8fafc;
|
215 |
padding: 1rem;
|
|
|
217 |
margin: 0.5rem 0;
|
218 |
}
|
219 |
|
|
|
220 |
.completion-suggestion {
|
221 |
background-color: #f1f5f9;
|
222 |
padding: 0.5rem;
|
|
|
252 |
if code_content:
|
253 |
code_analyzer = CodeAnalyzer()
|
254 |
context = code_analyzer.get_context(code_content)
|
255 |
+
completion = CodeCompletion()
|
256 |
suggestions = completion.get_completion_suggestions(code_content, context)
|
257 |
st.code(suggestions, language="python")
|
258 |
|
|
|
261 |
setup_page_config()
|
262 |
init_session_state()
|
263 |
|
264 |
+
# Initialize model
|
265 |
+
with st.spinner("Loading Qwen2.5-Coder model..."):
|
266 |
+
load_model_and_tokenizer()
|
267 |
+
|
268 |
# Sidebar configuration
|
269 |
with st.sidebar:
|
270 |
colored_header(label="Model Settings", description="Configure your chat parameters", color_name="blue-70")
|
271 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
with st.expander("Advanced Settings", expanded=False):
|
273 |
temperature = st.slider("Temperature", 0.0, 2.0, 0.7, 0.1)
|
274 |
max_tokens = st.number_input("Max Tokens", 50, 4096, 2048)
|
275 |
system_prompt = st.text_area("System Prompt", st.session_state.system_prompt)
|
|
|
276 |
|
277 |
if st.button("Clear Chat"):
|
278 |
clear_chat()
|
279 |
|
280 |
+
st.title("🤖 Qwen2.5-Coder Chat")
|
281 |
+
st.caption("Powered by Qwen2.5-Coder-32B-Instruct")
|
282 |
|
283 |
# Main interface tabs
|
284 |
tab1, tab2 = st.tabs(["Chat", "Code Editor"])
|
|
|
304 |
if not CodeAnalyzer.is_code_complete(code):
|
305 |
st.info("This code block appears to be incomplete. Would you like to complete it?")
|
306 |
if st.button("Complete Code", key=f"complete_{len(code)}"):
|
307 |
+
completion = handle_code_continuation(code)
|
308 |
st.code(completion, language="python")
|
309 |
|
310 |
# Chat input
|
311 |
+
if prompt := st.chat_input("Message (use @ to attach a file)"):
|
312 |
with st.chat_message("user"):
|
313 |
st.markdown(prompt)
|
314 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
315 |
|
316 |
with st.chat_message("assistant"):
|
317 |
+
with st.spinner("Thinking..."):
|
318 |
+
response = generate_response(prompt, temperature, max_tokens, system_prompt)
|
319 |
+
st.markdown(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
320 |
|
321 |
# Store code blocks in context
|
322 |
+
code_blocks = CodeAnalyzer.extract_code_blocks(response)
|
323 |
if code_blocks:
|
324 |
st.session_state.last_code_state = code_blocks[-1]
|
325 |
|
326 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
327 |
|
328 |
with tab2:
|
329 |
code_editor_section()
|
|
|
331 |
# Footer
|
332 |
add_vertical_space(2)
|
333 |
st.markdown("---")
|
334 |
+
st.markdown("Made with ❤️ using Streamlit and Qwen2.5-Coder")
|
335 |
|
336 |
if __name__ == "__main__":
|
337 |
main()
|