Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,119 +1,148 @@
|
|
1 |
-
import difflib
|
2 |
import streamlit as st
|
3 |
-
|
4 |
import os
|
|
|
|
|
|
|
5 |
|
6 |
-
# ---
|
7 |
-
st.set_page_config(page_title="AI
|
8 |
-
|
9 |
-
# --- Custom CSS for Professional Look ---
|
10 |
-
st.markdown("""
|
11 |
-
<style>
|
12 |
-
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
|
13 |
-
html, body, [class*="css"] {
|
14 |
-
font-family: 'Inter', sans-serif;
|
15 |
-
background-color: #f7f9fb;
|
16 |
-
}
|
17 |
-
.stApp {
|
18 |
-
background-color: #f7f9fb;
|
19 |
-
}
|
20 |
-
.stSidebar {
|
21 |
-
background-color: #22304a !important;
|
22 |
-
}
|
23 |
-
.stButton>button {
|
24 |
-
background-color: #22304a;
|
25 |
-
color: #fff;
|
26 |
-
border-radius: 6px;
|
27 |
-
border: none;
|
28 |
-
font-weight: 600;
|
29 |
-
padding: 0.5rem 1.5rem;
|
30 |
-
margin-top: 0.5rem;
|
31 |
-
margin-bottom: 0.5rem;
|
32 |
-
transition: background 0.2s;
|
33 |
-
}
|
34 |
-
.stButton>button:hover {
|
35 |
-
background-color: #1a2333;
|
36 |
-
color: #fff;
|
37 |
-
}
|
38 |
-
.stTextInput>div>div>input, .stTextArea>div>textarea {
|
39 |
-
background: #fff;
|
40 |
-
border: 1px solid #d1d5db;
|
41 |
-
border-radius: 6px;
|
42 |
-
color: #22304a;
|
43 |
-
font-size: 1rem;
|
44 |
-
}
|
45 |
-
.stDownloadButton>button {
|
46 |
-
background-color: #22304a;
|
47 |
-
color: #fff;
|
48 |
-
border-radius: 6px;
|
49 |
-
border: none;
|
50 |
-
font-weight: 600;
|
51 |
-
padding: 0.5rem 1.5rem;
|
52 |
-
margin-top: 0.5rem;
|
53 |
-
margin-bottom: 0.5rem;
|
54 |
-
transition: background 0.2s;
|
55 |
-
}
|
56 |
-
.stDownloadButton>button:hover {
|
57 |
-
background-color: #1a2333;
|
58 |
-
color: #fff;
|
59 |
-
}
|
60 |
-
.stExpanderHeader {
|
61 |
-
font-weight: 600;
|
62 |
-
color: #22304a;
|
63 |
-
font-size: 1.1rem;
|
64 |
-
}
|
65 |
-
.stMarkdown {
|
66 |
-
color: #22304a;
|
67 |
-
}
|
68 |
-
.stAlert {
|
69 |
-
border-radius: 6px;
|
70 |
-
}
|
71 |
-
</style>
|
72 |
-
""", unsafe_allow_html=True)
|
73 |
|
74 |
# --- Groq API Setup ---
|
75 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
76 |
if not GROQ_API_KEY:
|
77 |
-
st.error("
|
78 |
st.stop()
|
79 |
client = Groq(api_key=GROQ_API_KEY)
|
80 |
|
81 |
-
# ---
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
-
def
|
92 |
-
|
93 |
messages=[{"role": "user", "content": prompt}],
|
94 |
model="llama3-70b-8192",
|
95 |
)
|
96 |
-
return
|
97 |
-
|
98 |
-
def
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
-
def
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
def detect_code_type(code, programming_language):
|
119 |
backend_keywords = [
|
@@ -143,258 +172,179 @@ def detect_code_type(code, programming_language):
|
|
143 |
return 'frontend'
|
144 |
return 'unknown'
|
145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
def code_matches_language(code: str, language: str) -> bool:
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
)
|
172 |
-
explanation = groq_api_call(explain_prompt)
|
173 |
-
timeline.append({
|
174 |
-
"step": "Explain",
|
175 |
-
"description": "Step-by-step explanation of your code.",
|
176 |
-
"output": explanation,
|
177 |
-
"code": code
|
178 |
-
})
|
179 |
-
suggestions.append("Refactor your code for better readability and performance.")
|
180 |
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
if "```" in refactor_response:
|
187 |
-
parts = refactor_response.split("```")
|
188 |
-
refactor_explanation = parts[0].strip()
|
189 |
-
refactored_code = ""
|
190 |
-
for i in range(1, len(parts)):
|
191 |
-
if parts[i].strip().startswith(programming_language.lower()):
|
192 |
-
refactored_code = parts[i].strip().split('\n', 1)[1] if '\n' in parts[i] else ""
|
193 |
-
break
|
194 |
-
elif i == 1:
|
195 |
-
refactored_code = parts[i].strip().split('\n', 1)[1] if '\n' in parts[i] else ""
|
196 |
-
if not refactored_code:
|
197 |
-
refactored_code = refactor_response.strip()
|
198 |
-
else:
|
199 |
-
refactor_explanation = "Refactored code below."
|
200 |
-
refactored_code = refactor_response.strip()
|
201 |
-
timeline.append({
|
202 |
-
"step": "Refactor",
|
203 |
-
"description": refactor_explanation,
|
204 |
-
"output": refactored_code,
|
205 |
-
"code": refactored_code
|
206 |
-
})
|
207 |
-
suggestions.append("Review the refactored code for best practices and improvements.")
|
208 |
-
|
209 |
-
review_prompt = (
|
210 |
-
f"Provide a code review for the following {programming_language} code. "
|
211 |
-
f"Include feedback on best practices, code smells, optimization, and security issues, for a {skill_level.lower()} {user_role} in {explanation_language}:\n{refactored_code}"
|
212 |
-
)
|
213 |
-
review_feedback = groq_api_call(review_prompt)
|
214 |
-
timeline.append({
|
215 |
-
"step": "Review",
|
216 |
-
"description": "AI code review and feedback.",
|
217 |
-
"output": review_feedback,
|
218 |
-
"code": refactored_code
|
219 |
-
})
|
220 |
-
suggestions.append("Generate unit tests for your code.")
|
221 |
-
|
222 |
-
test_prompt = (
|
223 |
-
f"Write unit tests for the following {programming_language} code. "
|
224 |
-
f"Use pytest style and cover all functions. For a {skill_level.lower()} {user_role} in {explanation_language}:\n{refactored_code}"
|
225 |
-
)
|
226 |
-
test_code = groq_api_call(test_prompt)
|
227 |
-
timeline.append({
|
228 |
-
"step": "Test Generation",
|
229 |
-
"description": "AI-generated unit tests for your code.",
|
230 |
-
"output": test_code,
|
231 |
-
"code": test_code
|
232 |
})
|
233 |
-
suggestions.append("Run the generated tests in your local environment.")
|
234 |
|
235 |
-
|
236 |
-
|
237 |
-
st.
|
238 |
-
|
239 |
-
|
240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
"Programming Language",
|
246 |
-
["Python", "C++", "Java", "C#", "JavaScript", "TypeScript", "HTML"]
|
247 |
-
)
|
248 |
-
explanation_language = st.selectbox(
|
249 |
-
"Explanation Language",
|
250 |
-
["English", "Urdu", "Chinese", "Spanish"]
|
251 |
-
)
|
252 |
-
skill_level = st.selectbox("Skill Level", ["Beginner", "Intermediate", "Expert"])
|
253 |
-
user_role = st.selectbox(
|
254 |
-
"Choose Role",
|
255 |
-
["Data Scientist", "Backend Developer", "Frontend Developer", "Student"]
|
256 |
)
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
if "code" not in st.session_state:
|
261 |
-
st.session_state.code = ""
|
262 |
-
if "timeline" not in st.session_state:
|
263 |
-
st.session_state.timeline = []
|
264 |
-
if "suggestions" not in st.session_state:
|
265 |
-
st.session_state.suggestions = []
|
266 |
-
if "chat_history" not in st.session_state:
|
267 |
-
st.session_state.chat_history = []
|
268 |
-
|
269 |
-
col1, col2 = st.columns([2, 3], gap="large")
|
270 |
-
|
271 |
-
with col1:
|
272 |
-
st.subheader(f"{programming_language} Code")
|
273 |
-
uploaded_file = st.file_uploader(f"Upload .{programming_language.lower()} file", type=[programming_language.lower()])
|
274 |
-
code_input = st.text_area(
|
275 |
-
"Paste or edit your code here:",
|
276 |
-
height=300,
|
277 |
-
value=st.session_state.code,
|
278 |
-
key="main_code_input"
|
279 |
-
)
|
280 |
-
if uploaded_file is not None:
|
281 |
-
code = uploaded_file.read().decode("utf-8")
|
282 |
-
st.session_state.code = code
|
283 |
-
st.success("File uploaded successfully.")
|
284 |
-
elif code_input:
|
285 |
-
st.session_state.code = code_input
|
286 |
-
|
287 |
-
st.markdown(f"<b>Complexity:</b> {code_complexity(st.session_state.code)}", unsafe_allow_html=True)
|
288 |
-
|
289 |
-
st.markdown("---")
|
290 |
-
st.markdown("#### Agent Suggestions")
|
291 |
-
for suggestion in st.session_state.suggestions[-3:]:
|
292 |
-
st.markdown(f"- {suggestion}")
|
293 |
-
|
294 |
-
st.markdown("---")
|
295 |
-
st.markdown("#### Download Full Report")
|
296 |
-
if st.session_state.timeline:
|
297 |
-
report = ""
|
298 |
-
for step in st.session_state.timeline:
|
299 |
-
report += f"## {step['step']}\n{step['description']}\n\n{step['output']}\n\n"
|
300 |
-
st.download_button("Download Report", report, file_name="ai_code_assistant_report.txt")
|
301 |
-
|
302 |
-
with col2:
|
303 |
-
st.subheader("Agentic Workflow")
|
304 |
-
if st.button("Run Full AI Agent Workflow"):
|
305 |
-
if not st.session_state.code.strip():
|
306 |
-
st.warning("Please enter or upload code first.")
|
307 |
-
else:
|
308 |
-
# Language check
|
309 |
-
if not code_matches_language(st.session_state.code, programming_language):
|
310 |
-
st.error(f"It looks like you provided code in a different language. Please provide {programming_language} code.")
|
311 |
-
else:
|
312 |
-
code_type = detect_code_type(st.session_state.code, programming_language)
|
313 |
-
# Role/code type enforcement
|
314 |
-
if code_type == "data_science" and user_role != "Data Scientist":
|
315 |
-
st.error("It looks like you provided data science code. Please select 'Data Scientist' as your role.")
|
316 |
-
elif code_type == "frontend" and user_role != "Frontend Developer":
|
317 |
-
st.error("It looks like you provided frontend code. Please select 'Frontend Developer' as your role.")
|
318 |
-
elif code_type == "backend" and user_role != "Backend Developer":
|
319 |
-
st.error("It looks like you provided backend code. Please select 'Backend Developer' as your role.")
|
320 |
-
elif code_type == "unknown":
|
321 |
-
st.warning("Could not determine the code type. Please make sure your code is complete and clear.")
|
322 |
-
else:
|
323 |
-
with st.spinner("AI Agent is working through all steps..."):
|
324 |
-
timeline, suggestions = agentic_workflow(
|
325 |
-
st.session_state.code,
|
326 |
-
skill_level,
|
327 |
-
programming_language,
|
328 |
-
explanation_language,
|
329 |
-
user_role
|
330 |
-
)
|
331 |
-
st.session_state.timeline = timeline
|
332 |
-
st.session_state.suggestions = suggestions
|
333 |
-
st.success("Agentic workflow complete. See timeline below.")
|
334 |
-
|
335 |
-
# Chatbox with history using Blackbox AI agent
|
336 |
-
st.subheader("Chat with Blackbox AI Agent")
|
337 |
-
user_input = st.text_input("Enter your message:", key="chat_input")
|
338 |
-
if user_input:
|
339 |
-
st.session_state.chat_history.append({"role": "user", "content": user_input})
|
340 |
-
response = blackbox_ai_call(st.session_state.chat_history)
|
341 |
-
st.session_state.chat_history.append({"role": "assistant", "content": response})
|
342 |
-
|
343 |
-
for chat in st.session_state.chat_history:
|
344 |
-
if chat["role"] == "user":
|
345 |
-
st.markdown(f"**You:** {chat['content']}")
|
346 |
-
else:
|
347 |
-
st.markdown(f"**Blackbox AI:** {chat['content']}")
|
348 |
-
|
349 |
-
# --- Semantic Search with history ---
|
350 |
-
st.markdown("---")
|
351 |
-
st.subheader("Semantic Search with Contextual History")
|
352 |
-
|
353 |
-
if "semantic_search_history" not in st.session_state:
|
354 |
-
st.session_state.semantic_search_history = []
|
355 |
-
|
356 |
-
sem_code = st.text_area("Your Code for Semantic Search", height=300, placeholder="Paste your code here...")
|
357 |
-
sem_question = st.text_input("Ask a question about your code:")
|
358 |
-
|
359 |
-
if st.button("Ask Semantic Search"):
|
360 |
-
if not sem_code.strip() or not sem_question.strip():
|
361 |
-
st.warning("Please provide both code and a question.")
|
362 |
else:
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
else:
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import difflib
|
3 |
import os
|
4 |
+
import re
|
5 |
+
import hashlib
|
6 |
+
from groq import Groq
|
7 |
|
8 |
+
# --- Page config ---
|
9 |
+
st.set_page_config(page_title="๐ AI Assistant with Workflow + Semantic Search", layout="wide")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# --- Groq API Setup ---
|
12 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
13 |
if not GROQ_API_KEY:
|
14 |
+
st.error("โ Please set your GROQ_API_KEY environment variable.")
|
15 |
st.stop()
|
16 |
client = Groq(api_key=GROQ_API_KEY)
|
17 |
|
18 |
+
# --- Cache for embeddings ---
|
19 |
+
embedding_cache = {}
|
20 |
+
|
21 |
+
def get_embedding(text):
|
22 |
+
key = hashlib.sha256(text.encode()).hexdigest()
|
23 |
+
if key in embedding_cache:
|
24 |
+
return embedding_cache[key]
|
25 |
+
embedding = [ord(c) % 100 / 100 for c in text[:512]]
|
26 |
+
embedding_cache[key] = embedding
|
27 |
+
return embedding
|
28 |
+
|
29 |
+
def cosine_similarity(vec1, vec2):
|
30 |
+
dot = sum(a*b for a,b in zip(vec1, vec2))
|
31 |
+
norm1 = sum(a*a for a in vec1) ** 0.5
|
32 |
+
norm2 = sum(b*b for b in vec2) ** 0.5
|
33 |
+
return dot / (norm1 * norm2 + 1e-8)
|
34 |
+
|
35 |
+
def split_code_into_chunks(code, lang):
|
36 |
+
if lang.lower() == "python":
|
37 |
+
pattern = r'(def\\s+\\w+\\(.*?\\):|class\\s+\\w+\\(?.*?\\)?:)'
|
38 |
+
splits = re.split(pattern, code)
|
39 |
+
chunks = []
|
40 |
+
for i in range(1, len(splits), 2):
|
41 |
+
header = splits[i]
|
42 |
+
body = splits[i+1] if (i+1) < len(splits) else ""
|
43 |
+
chunks.append(header + body)
|
44 |
+
return chunks if chunks else [code]
|
45 |
+
else:
|
46 |
+
return [code]
|
47 |
|
48 |
+
def groq_call(prompt):
|
49 |
+
resp = client.chat.completions.create(
|
50 |
messages=[{"role": "user", "content": prompt}],
|
51 |
model="llama3-70b-8192",
|
52 |
)
|
53 |
+
return resp.choices[0].message.content
|
54 |
+
|
55 |
+
def semantic_search_improved(code, question, lang, skill, role, explain_lang):
|
56 |
+
chunks = split_code_into_chunks(code, lang)
|
57 |
+
question_emb = get_embedding(question)
|
58 |
+
scored_chunks = []
|
59 |
+
for chunk in chunks:
|
60 |
+
emb = get_embedding(chunk)
|
61 |
+
score = cosine_similarity(question_emb, emb)
|
62 |
+
scored_chunks.append((score, chunk))
|
63 |
+
scored_chunks.sort(key=lambda x: x[0], reverse=True)
|
64 |
+
top_chunks = [c for _, c in scored_chunks[:3]]
|
65 |
+
combined_code = "\\n\\n".join(top_chunks)
|
66 |
+
prompt = (
|
67 |
+
f"You are a friendly and insightful {lang} expert helping a {skill} {role}.\\n"
|
68 |
+
f"Based on these relevant code snippets:\\n{combined_code}\\n"
|
69 |
+
f"Answer this question in {explain_lang}:\\n{question}\\n"
|
70 |
+
f"Explain which parts handle the question and how to modify them if needed."
|
71 |
+
)
|
72 |
+
return groq_call(prompt)
|
73 |
|
74 |
+
def error_detection_and_fixes(refactored_code, lang, skill, role, explain_lang):
|
75 |
+
prompt = (
|
76 |
+
f"You are a senior {lang} developer. Analyze this code for bugs, security flaws, "
|
77 |
+
f"and performance issues. Suggest fixes with explanations in {explain_lang}:\\n\\n{refactored_code}"
|
78 |
+
)
|
79 |
+
return groq_call(prompt)
|
80 |
+
|
81 |
+
def agentic_workflow(code, skill_level, programming_language, explanation_language, user_role):
|
82 |
+
timeline = []
|
83 |
+
suggestions = []
|
84 |
+
|
85 |
+
# Explanation
|
86 |
+
explain_prompt = (
|
87 |
+
f"You are a friendly and insightful {programming_language} expert helping a {skill_level} {user_role}. "
|
88 |
+
f"Explain this code in {explanation_language} with clear examples, analogies, and why each part matters:\\n\\n{code}"
|
89 |
+
)
|
90 |
+
explanation = groq_call(explain_prompt)
|
91 |
+
timeline.append({"step": "Explain", "description": "Detailed explanation", "output": explanation, "code": code})
|
92 |
+
suggestions.append("Consider refactoring your code to improve readability and performance.")
|
93 |
+
|
94 |
+
# Refactor
|
95 |
+
refactor_prompt = (
|
96 |
+
f"Refactor this {programming_language} code. Explain the changes like a mentor helping a {skill_level} {user_role}. "
|
97 |
+
f"Include best practices and improvements:\\n\\n{code}"
|
98 |
+
)
|
99 |
+
refactor_response = groq_call(refactor_prompt)
|
100 |
+
if "```" in refactor_response:
|
101 |
+
parts = refactor_response.split("```")
|
102 |
+
refactored_code = ""
|
103 |
+
for part in parts:
|
104 |
+
if part.strip().startswith(programming_language.lower()):
|
105 |
+
refactored_code = part.strip().split('\\n', 1)[1] if '\\n' in part else ""
|
106 |
+
break
|
107 |
+
if not refactored_code:
|
108 |
+
refactored_code = refactor_response
|
109 |
+
else:
|
110 |
+
refactored_code = refactor_response
|
111 |
+
timeline.append({"step": "Refactor", "description": "Refactored code with improvements", "output": refactored_code, "code": refactored_code})
|
112 |
+
suggestions.append("Review the refactored code and adapt it to your style or project needs.")
|
113 |
+
|
114 |
+
# Review
|
115 |
+
review_prompt = (
|
116 |
+
f"As a senior {programming_language} developer, review the refactored code. "
|
117 |
+
f"Give constructive feedback on strengths, weaknesses, performance, security, and improvements in {explanation_language}:\\n\\n{refactored_code}"
|
118 |
+
)
|
119 |
+
review = groq_call(review_prompt)
|
120 |
+
timeline.append({"step": "Review", "description": "Code review and suggestions", "output": review, "code": refactored_code})
|
121 |
+
suggestions.append("Incorporate review feedback for cleaner, robust code.")
|
122 |
+
|
123 |
+
# Error detection & fixes
|
124 |
+
errors = error_detection_and_fixes(refactored_code, programming_language, skill_level, user_role, explanation_language)
|
125 |
+
timeline.append({"step": "Error Detection", "description": "Bugs, security, performance suggestions", "output": errors, "code": refactored_code})
|
126 |
+
suggestions.append("Apply fixes to improve code safety and performance.")
|
127 |
+
|
128 |
+
# Test generation
|
129 |
+
test_prompt = (
|
130 |
+
f"Write clear, effective unit tests for this {programming_language} code. "
|
131 |
+
f"Explain what each test does in {explanation_language}, for a {skill_level} {user_role}:\\n\\n{refactored_code}"
|
132 |
+
)
|
133 |
+
tests = groq_call(test_prompt)
|
134 |
+
timeline.append({"step": "Test Generation", "description": "Generated unit tests", "output": tests, "code": tests})
|
135 |
+
suggestions.append("Run generated tests locally to validate changes.")
|
136 |
+
|
137 |
+
return timeline, suggestions
|
138 |
+
|
139 |
+
def get_inline_diff_html(original, modified):
|
140 |
+
differ = difflib.HtmlDiff(tabsize=4, wrapcolumn=80)
|
141 |
+
html = differ.make_table(
|
142 |
+
original.splitlines(), modified.splitlines(),
|
143 |
+
"Original", "Refactored", context=True, numlines=2
|
144 |
+
)
|
145 |
+
return f'<div style="overflow-x:auto; max-height:400px;">{html}</div>'
|
146 |
|
147 |
def detect_code_type(code, programming_language):
|
148 |
backend_keywords = [
|
|
|
172 |
return 'frontend'
|
173 |
return 'unknown'
|
174 |
|
175 |
+
def code_complexity(code):
|
176 |
+
lines = code.count('\\n') + 1
|
177 |
+
functions = code.count('def ')
|
178 |
+
classes = code.count('class ')
|
179 |
+
comments = code.count('#')
|
180 |
+
return f"Lines: {lines}, Functions: {functions}, Classes: {classes}, Comments: {comments}"
|
181 |
+
|
182 |
def code_matches_language(code: str, language: str) -> bool:
|
183 |
+
code_lower = code.strip().lower()
|
184 |
+
language = language.lower()
|
185 |
+
|
186 |
+
patterns = {
|
187 |
+
"python": [
|
188 |
+
"def ", "class ", "import ", "from ", "try:", "except", "raise", "lambda",
|
189 |
+
"with ", "yield", "async ", "await", "print(", "self.", "__init__", "__name__",
|
190 |
+
"if __name__ == '__main__':", "#!", # shebang for executable scripts
|
191 |
+
],
|
192 |
+
"c++": [
|
193 |
+
"#include", "int main(", "std::", "::", "cout <<", "cin >>", "new ", "delete ",
|
194 |
+
"try {", "catch(", "template<", "using namespace", "class ", "struct ", "#define",
|
195 |
+
],
|
196 |
+
"java": [
|
197 |
+
"package ", "import java.", "public class", "private ", "protected ", "public static void main",
|
198 |
+
"System.out.println", "try {", "catch(", "throw new ", "implements ", "extends ",
|
199 |
+
"@Override", "interface ", "enum ", "synchronized ", "final ",
|
200 |
+
],
|
201 |
+
"c#": [
|
202 |
+
"using System", "namespace ", "class ", "interface ", "public static void Main",
|
203 |
+
"Console.WriteLine", "try {", "catch(", "throw ", "async ", "await ", "get;", "set;",
|
204 |
+
"List<", "Dictionary<", "[Serializable]", "[Obsolete]",
|
205 |
+
],
|
206 |
+
"javascript": [
|
207 |
+
"function ", "const ", "let ", "var ", "document.", "window.", "console.log",
|
208 |
+
"if(", "for(", "while(", "switch(", "try {", "catch(", "export ", "import ", "async ",
|
209 |
+
"await ", "=>", "this.", "class ", "prototype", "new ", "$(",
|
210 |
+
],
|
211 |
+
"typescript": [
|
212 |
+
"function ", "const ", "let ", "interface ", "type ", ": string", ": number", ": boolean",
|
213 |
+
"implements ", "extends ", "enum ", "public ", "private ", "protected ", "readonly ",
|
214 |
+
"import ", "export ", "console.log", "async ", "await ", "=>", "this.",
|
215 |
+
],
|
216 |
+
"html": [
|
217 |
+
"<!doctype html", "<html", "<head>", "<body>", "<script", "<style", "<meta ", "<link ",
|
218 |
+
"<title>", "<div", "<span", "<p>", "<h1>", "<ul>", "<li>", "<form", "<input", "<button",
|
219 |
+
"<table", "<footer", "<header", "<section", "<article", "<nav", "<img", "<a ", "</html>",
|
220 |
+
],
|
221 |
+
}
|
222 |
|
223 |
+
match_patterns = patterns.get(language, [])
|
224 |
+
match_count = sum(1 for pattern in match_patterns if pattern in code_lower)
|
225 |
+
return match_count >= 1
|
226 |
|
227 |
+
# --- Blackbox Agent Chat History (Session-only) ---
|
228 |
+
if 'blackbox_chat_history' not in st.session_state:
|
229 |
+
st.session_state['blackbox_chat_history'] = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
|
231 |
+
def add_to_blackbox_history(prompt, response, mode):
|
232 |
+
st.session_state['blackbox_chat_history'].append({
|
233 |
+
'mode': mode, # 'workflow' or 'semantic_search'
|
234 |
+
'prompt': prompt,
|
235 |
+
'response': response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
})
|
|
|
237 |
|
238 |
+
def show_blackbox_history():
|
239 |
+
st.sidebar.markdown('---')
|
240 |
+
st.sidebar.subheader('๐ Blackbox Agent Chat History')
|
241 |
+
if not st.session_state['blackbox_chat_history']:
|
242 |
+
st.sidebar.info('No chat history this session.')
|
243 |
+
else:
|
244 |
+
for i, entry in enumerate(reversed(st.session_state['blackbox_chat_history'])):
|
245 |
+
with st.sidebar.expander(f"{entry['mode'].replace('_', ' ').title()} #{len(st.session_state['blackbox_chat_history'])-i}"):
|
246 |
+
st.markdown(f"**Prompt:**\\n{entry['prompt']}")
|
247 |
+
st.markdown(f"**Response:**\\n{entry['response']}")
|
248 |
+
|
249 |
+
# Show chat history in sidebar
|
250 |
+
show_blackbox_history()
|
251 |
+
|
252 |
+
# --- Sidebar ---
|
253 |
+
st.sidebar.title("๐ง Configuration")
|
254 |
+
lang = st.sidebar.selectbox("Programming Language", ["Python", "JavaScript", "C++", "Java", "C#", "TypeScript"])
|
255 |
+
skill = st.sidebar.selectbox("Skill Level", ["Beginner", "Intermediate", "Expert"])
|
256 |
+
role = st.sidebar.selectbox("Your Role", ["Student", "Frontend Developer", "Backend Developer", "Data Scientist"])
|
257 |
+
explain_lang = st.sidebar.selectbox("Explanation Language", ["English", "Spanish", "Chinese", "Urdu"])
|
258 |
+
st.sidebar.markdown("---")
|
259 |
+
st.sidebar.markdown("<span style='color:#fff;'>Powered by <b>BLACKBOX.AI</b></span>", unsafe_allow_html=True)
|
260 |
+
|
261 |
+
tabs = st.tabs(["๐ง Full AI Workflow", "๐ Semantic Search"])
|
262 |
+
|
263 |
+
# --- Tab 1: Full AI Workflow ---
|
264 |
+
with tabs[0]:
|
265 |
+
st.title("๐ง Full AI Workflow")
|
266 |
+
file_types = {
|
267 |
+
"Python": ["py"],
|
268 |
+
"JavaScript": ["js"],
|
269 |
+
"C++": ["cpp", "h", "hpp"],
|
270 |
+
"Java": ["java"],
|
271 |
+
"C#": ["cs"],
|
272 |
+
"TypeScript": ["ts"],
|
273 |
+
}
|
274 |
|
275 |
+
uploaded_file = st.file_uploader(
|
276 |
+
f"Upload {', '.join(file_types.get(lang, []))} file(s)",
|
277 |
+
type=file_types.get(lang, None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
)
|
279 |
+
if uploaded_file:
|
280 |
+
code_input = uploaded_file.read().decode("utf-8")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
281 |
else:
|
282 |
+
code_input = st.text_area("Your Code", height=300, placeholder="Paste your code here...")
|
283 |
+
|
284 |
+
if code_input:
|
285 |
+
st.markdown(f"<b>Complexity:</b> {code_complexity(code_input)}", unsafe_allow_html=True)
|
286 |
+
|
287 |
+
if st.button("Run AI Workflow"):
|
288 |
+
if not code_input.strip():
|
289 |
+
st.warning("Please paste or upload your code.")
|
290 |
+
elif not code_matches_language(code_input, lang):
|
291 |
+
st.error(f"The pasted code doesnโt look like valid {lang} code. Please check your code or select the correct language.")
|
292 |
+
else:
|
293 |
+
code_type = detect_code_type(code_input, lang)
|
294 |
+
if code_type == "data_science" and role != "Data Scientist":
|
295 |
+
st.error("Data science code detected. Please select 'Data Scientist' role.")
|
296 |
+
elif code_type == "frontend" and role != "Frontend Developer":
|
297 |
+
st.error("Frontend code detected. Please select 'Frontend Developer' role.")
|
298 |
+
elif code_type == "backend" and role != "Backend Developer":
|
299 |
+
st.error("Backend code detected. Please select 'Backend Developer' role.")
|
300 |
else:
|
301 |
+
with st.spinner("Running agentic workflow..."):
|
302 |
+
timeline, suggestions = agentic_workflow(code_input, skill, lang, explain_lang, role)
|
303 |
+
# Log to Blackbox chat history
|
304 |
+
add_to_blackbox_history(f"[Full Workflow] Code:\\n{code_input}", f"{timeline[-1]['output'] if timeline else ''}", mode='workflow')
|
305 |
+
# Show each step in an expander
|
306 |
+
for step in timeline:
|
307 |
+
with st.expander(f"โ
{step['step']} - {step['description']}"):
|
308 |
+
if step['step'] == "Refactor":
|
309 |
+
diff_html = get_inline_diff_html(code_input, step['code'])
|
310 |
+
st.markdown(diff_html, unsafe_allow_html=True)
|
311 |
+
st.code(step['output'], language=lang.lower())
|
312 |
+
else:
|
313 |
+
st.markdown(step['output'])
|
314 |
+
|
315 |
+
st.markdown("#### Agent Suggestions")
|
316 |
+
for s in suggestions:
|
317 |
+
st.markdown(f"- {s}")
|
318 |
+
|
319 |
+
# Download buttons after suggestions
|
320 |
+
st.markdown("---")
|
321 |
+
st.markdown("### ๐ฅ Download Results")
|
322 |
+
|
323 |
+
report_text = ""
|
324 |
+
for step in timeline:
|
325 |
+
report_text += f"## {step['step']}\\n{step['description']}\\n\\n{step['output']}\\n\\n"
|
326 |
+
|
327 |
+
st.download_button(
|
328 |
+
label="๐ Download Full Workflow Report",
|
329 |
+
data=report_text,
|
330 |
+
file_name="ai_workflow_report.txt",
|
331 |
+
mime="text/plain",
|
332 |
+
)
|
333 |
+
|
334 |
+
# --- Tab 2: Semantic Search ---
|
335 |
+
with tabs[1]:
|
336 |
+
st.title("๐ Semantic Search")
|
337 |
+
sem_code = st.text_area("Your Code", height=300, placeholder="Paste your code...")
|
338 |
+
sem_q = st.text_input("Your Question", placeholder="E.g., What does this function do?")
|
339 |
+
if st.button("Run Semantic Search"):
|
340 |
+
if not sem_code.strip() or not sem_q.strip():
|
341 |
+
st.warning("Code and question required.")
|
342 |
+
else:
|
343 |
+
with st.spinner("Running semantic search..."):
|
344 |
+
answer = semantic_search_improved(sem_code, sem_q, lang, skill, role, explain_lang)
|
345 |
+
# Log to Blackbox chat history
|
346 |
+
add_to_blackbox_history(f"[Semantic Search] Q: {sem_q}\\nCode:\\n{sem_code}", answer, mode='semantic_search')
|
347 |
+
st.markdown("### ๐ Answer")
|
348 |
+
st.markdown(answer)
|
349 |
+
|
350 |
+
st.markdown("---")
|