Spaces:
Sleeping
Sleeping
added more model provider and models and increase context
Browse files- SDLC.py +28 -20
- app.py +34 -3
- requirements.txt +33 -32
SDLC.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#
|
2 |
import os
|
3 |
import sys
|
4 |
import shutil
|
@@ -10,8 +10,8 @@ from langchain_core.language_models.base import BaseLanguageModel # Correct impo
|
|
10 |
from langchain_groq import ChatGroq
|
11 |
from langchain_openai import ChatOpenAI
|
12 |
# Add imports for other potential providers if needed
|
13 |
-
|
14 |
-
|
15 |
from tavily import TavilyClient
|
16 |
from dotenv import load_dotenv
|
17 |
import operator
|
@@ -198,10 +198,18 @@ def initialize_llm_clients(provider: str, model_name: str, llm_api_key: str, tav
|
|
198 |
elif provider_lower == "groq":
|
199 |
llm_instance = ChatGroq(model=model_name, temperature=0.5, api_key=llm_api_key)
|
200 |
# Add elif blocks for other providers here
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
else:
|
206 |
raise ValueError(f"Unsupported LLM provider: {provider}")
|
207 |
|
@@ -677,7 +685,7 @@ def generate_initial_code(state: MainState) -> MainState:
|
|
677 |
if not llm: raise ConnectionError("LLM instance not found in state.")
|
678 |
if 'messages' not in state: state['messages'] = []
|
679 |
uml_types = ', '.join([c.diagram_type for c in state.get('final_uml_codes', [])])
|
680 |
-
prompt = f"Generate complete, runnable '{state['coding_language']}' project for '{state['project']}'. Base on Design Doc, User Stories, and UML ({uml_types}). Include main scripts, modules, requirements, basic README, comments.\nDesign:\n{state.get('final_design_document', 'N/A')}\nStories (Context):\n{state.get('final_user_story', 'N/A')
|
681 |
structured_llm = llm.with_structured_output(GeneratedCode) # Use LLM from state
|
682 |
response = structured_llm.invoke(prompt)
|
683 |
if not response or not isinstance(response, GeneratedCode) or not response.files:
|
@@ -722,7 +730,7 @@ def generate_code_feedback(state: MainState) -> MainState:
|
|
722 |
func_name = "generate_code_feedback"
|
723 |
code_c = state.get("code_current"); instructions = ""
|
724 |
# --- CORRECTED LOOP ---
|
725 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
726 |
files_to_process = code_c.files if code_c and isinstance(code_c, GeneratedCode) else []
|
727 |
if not files_to_process: logger.warning(f"No files in code_current for {func_name}"); code_content = "No code files provided."; instructions = "N/A"
|
728 |
else:
|
@@ -754,7 +762,7 @@ def refine_code(state: MainState) -> MainState:
|
|
754 |
func_name = "refine_code"
|
755 |
code_c = state.get("code_current"); instructions = ""
|
756 |
# --- CORRECTED LOOP ---
|
757 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
758 |
files_to_process = code_c.files if code_c and isinstance(code_c, GeneratedCode) else []
|
759 |
if not files_to_process: logger.warning(f"No files in code_current for {func_name}"); code_content = "No previous code."; instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
760 |
else:
|
@@ -791,7 +799,7 @@ def code_review(state: MainState) -> MainState:
|
|
791 |
code_files_to_review = state.get("final_code_files", [])
|
792 |
if not code_files_to_review: logger.warning(f"No files in final_code_files for {func_name}"); state["code_review_current_feedback"] = "No code available."; state["messages"].append(AIMessage(content="Code Review: No code.")); return state
|
793 |
# --- CORRECTED LOOP ---
|
794 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
795 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
796 |
files_to_process = code_files_to_review
|
797 |
for file in files_to_process:
|
@@ -822,7 +830,7 @@ def security_check(state: MainState) -> MainState:
|
|
822 |
code_files_to_check = state.get("final_code_files", [])
|
823 |
if not code_files_to_check: logger.warning(f"No files in final_code_files for {func_name}"); state["security_current_feedback"] = "No code available."; state["messages"].append(AIMessage(content="Security Check: No code.")); return state
|
824 |
# --- CORRECTED LOOP ---
|
825 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
826 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
827 |
files_to_process = code_files_to_check
|
828 |
for file in files_to_process:
|
@@ -854,7 +862,7 @@ def refine_code_with_reviews(state: MainState) -> MainState:
|
|
854 |
if not code_files_to_refine: logger.error(f"No files in final_code_files for {func_name}"); raise ValueError("No code available.")
|
855 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
856 |
# --- CORRECTED LOOP ---
|
857 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
858 |
files_to_process = code_files_to_refine
|
859 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_content = "No previous code."
|
860 |
else:
|
@@ -941,7 +949,7 @@ def generate_initial_test_cases(state: MainState) -> MainState:
|
|
941 |
if 'messages' not in state: state['messages'] = []
|
942 |
func_name = "generate_initial_test_cases"
|
943 |
# --- RECOMMENDED: Use corrected loop ---
|
944 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
945 |
files_to_process = state.get("final_code_files", [])
|
946 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_str = "No code files provided."
|
947 |
else:
|
@@ -997,7 +1005,7 @@ def refine_test_cases_and_code(state: MainState) -> MainState:
|
|
997 |
if not current_tests or not current_code_files: logger.error(f"Missing tests or code for {func_name}"); raise ValueError("Missing data.")
|
998 |
tests_str = "\n".join([f"- {tc.description}: Input={tc.input_data}, Expected={tc.expected_output}" for tc in current_tests])
|
999 |
# --- CORRECTED LOOP ---
|
1000 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
1001 |
files_to_process = current_code_files
|
1002 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_str = "No code."
|
1003 |
else:
|
@@ -1082,7 +1090,7 @@ def generate_initial_quality_analysis(state: MainState) -> MainState:
|
|
1082 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1083 |
if not code_files_passed: logger.warning(f"No tested code for {func_name}."); state["quality_current_analysis"] = "No passed code available."; return state
|
1084 |
# --- CORRECTED LOOP ---
|
1085 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
1086 |
files_to_process = code_files_passed
|
1087 |
if not files_to_process: logger.error(f"Logic error: files_to_process empty in {func_name}"); code_str = "Error retrieving code."
|
1088 |
else:
|
@@ -1131,7 +1139,7 @@ def refine_quality_and_code(state: MainState) -> MainState:
|
|
1131 |
code_files_base = state.get("final_test_code_files", [])
|
1132 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1133 |
# --- CORRECTED LOOP ---
|
1134 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
1135 |
files_to_process = code_files_base
|
1136 |
if not files_to_process: logger.warning(f"No tested code for {func_name}"); code_content = "N/A"
|
1137 |
else:
|
@@ -1209,7 +1217,7 @@ def generate_initial_deployment(state: MainState, prefs: str) -> MainState:
|
|
1209 |
if not final_code: logger.error(f"No final code for {func_name}"); raise ValueError("Final code missing.")
|
1210 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1211 |
# --- CORRECTED LOOP ---
|
1212 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
1213 |
files_to_process = final_code
|
1214 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_context = "No code files."
|
1215 |
else:
|
@@ -1258,7 +1266,7 @@ def refine_deployment(state: MainState) -> MainState:
|
|
1258 |
func_name = "refine_deployment"
|
1259 |
current_plan = state.get('deployment_current_process', 'N/A'); ai_feedback = state.get('deployment_feedback', 'N/A'); human_feedback = state.get('deployment_human_feedback', 'N/A')
|
1260 |
# --- ADDED LOOP ---
|
1261 |
-
code_str_parts = []; total_len = 0; max_code_len =
|
1262 |
final_code = state.get("final_code_files", []); instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1263 |
files_to_process = final_code
|
1264 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_context = "No code files."
|
@@ -1298,4 +1306,4 @@ def save_final_deployment_plan(state: MainState) -> MainState:
|
|
1298 |
state["final_deployment_path"] = filepath
|
1299 |
return state
|
1300 |
|
1301 |
-
# --- END OF
|
|
|
1 |
+
# SDLC.py
|
2 |
import os
|
3 |
import sys
|
4 |
import shutil
|
|
|
10 |
from langchain_groq import ChatGroq
|
11 |
from langchain_openai import ChatOpenAI
|
12 |
# Add imports for other potential providers if needed
|
13 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
14 |
+
from langchain_anthropic import ChatAnthropic
|
15 |
from tavily import TavilyClient
|
16 |
from dotenv import load_dotenv
|
17 |
import operator
|
|
|
198 |
elif provider_lower == "groq":
|
199 |
llm_instance = ChatGroq(model=model_name, temperature=0.5, api_key=llm_api_key)
|
200 |
# Add elif blocks for other providers here
|
201 |
+
elif provider_lower == "google":
|
202 |
+
llm_instance = ChatGoogleGenerativeAI(model=model_name, google_api_key=llm_api_key, temperature=0.5)
|
203 |
+
elif provider_lower == "anthropic":
|
204 |
+
llm_instance = ChatAnthropic(model=model_name, anthropic_api_key=llm_api_key, temperature=0.5)
|
205 |
+
elif provider_lower == "xai":
|
206 |
+
# Adding support for xAI's Grok, assuming an OpenAI-compatible API
|
207 |
+
llm_instance = ChatOpenAI(
|
208 |
+
model=model_name,
|
209 |
+
temperature=0.5,
|
210 |
+
api_key=llm_api_key,
|
211 |
+
base_url="https://api.x.ai" # Replace with the actual xAI API endpoint
|
212 |
+
)
|
213 |
else:
|
214 |
raise ValueError(f"Unsupported LLM provider: {provider}")
|
215 |
|
|
|
685 |
if not llm: raise ConnectionError("LLM instance not found in state.")
|
686 |
if 'messages' not in state: state['messages'] = []
|
687 |
uml_types = ', '.join([c.diagram_type for c in state.get('final_uml_codes', [])])
|
688 |
+
prompt = f"Generate complete, runnable '{state['coding_language']}' project for '{state['project']}'. Base on Design Doc, User Stories, and UML ({uml_types}). Include main scripts, modules, requirements, basic README, comments.\nDesign:\n{state.get('final_design_document', 'N/A')}\nStories (Context):\n{state.get('final_user_story', 'N/A')}...\n---\nOutput ONLY JSON (GeneratedCode model)."
|
689 |
structured_llm = llm.with_structured_output(GeneratedCode) # Use LLM from state
|
690 |
response = structured_llm.invoke(prompt)
|
691 |
if not response or not isinstance(response, GeneratedCode) or not response.files:
|
|
|
730 |
func_name = "generate_code_feedback"
|
731 |
code_c = state.get("code_current"); instructions = ""
|
732 |
# --- CORRECTED LOOP ---
|
733 |
+
code_str_parts = []; total_len = 0; max_code_len = 250000
|
734 |
files_to_process = code_c.files if code_c and isinstance(code_c, GeneratedCode) else []
|
735 |
if not files_to_process: logger.warning(f"No files in code_current for {func_name}"); code_content = "No code files provided."; instructions = "N/A"
|
736 |
else:
|
|
|
762 |
func_name = "refine_code"
|
763 |
code_c = state.get("code_current"); instructions = ""
|
764 |
# --- CORRECTED LOOP ---
|
765 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
766 |
files_to_process = code_c.files if code_c and isinstance(code_c, GeneratedCode) else []
|
767 |
if not files_to_process: logger.warning(f"No files in code_current for {func_name}"); code_content = "No previous code."; instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
768 |
else:
|
|
|
799 |
code_files_to_review = state.get("final_code_files", [])
|
800 |
if not code_files_to_review: logger.warning(f"No files in final_code_files for {func_name}"); state["code_review_current_feedback"] = "No code available."; state["messages"].append(AIMessage(content="Code Review: No code.")); return state
|
801 |
# --- CORRECTED LOOP ---
|
802 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
803 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
804 |
files_to_process = code_files_to_review
|
805 |
for file in files_to_process:
|
|
|
830 |
code_files_to_check = state.get("final_code_files", [])
|
831 |
if not code_files_to_check: logger.warning(f"No files in final_code_files for {func_name}"); state["security_current_feedback"] = "No code available."; state["messages"].append(AIMessage(content="Security Check: No code.")); return state
|
832 |
# --- CORRECTED LOOP ---
|
833 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
834 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
835 |
files_to_process = code_files_to_check
|
836 |
for file in files_to_process:
|
|
|
862 |
if not code_files_to_refine: logger.error(f"No files in final_code_files for {func_name}"); raise ValueError("No code available.")
|
863 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
864 |
# --- CORRECTED LOOP ---
|
865 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
866 |
files_to_process = code_files_to_refine
|
867 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_content = "No previous code."
|
868 |
else:
|
|
|
949 |
if 'messages' not in state: state['messages'] = []
|
950 |
func_name = "generate_initial_test_cases"
|
951 |
# --- RECOMMENDED: Use corrected loop ---
|
952 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
953 |
files_to_process = state.get("final_code_files", [])
|
954 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_str = "No code files provided."
|
955 |
else:
|
|
|
1005 |
if not current_tests or not current_code_files: logger.error(f"Missing tests or code for {func_name}"); raise ValueError("Missing data.")
|
1006 |
tests_str = "\n".join([f"- {tc.description}: Input={tc.input_data}, Expected={tc.expected_output}" for tc in current_tests])
|
1007 |
# --- CORRECTED LOOP ---
|
1008 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
1009 |
files_to_process = current_code_files
|
1010 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_str = "No code."
|
1011 |
else:
|
|
|
1090 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1091 |
if not code_files_passed: logger.warning(f"No tested code for {func_name}."); state["quality_current_analysis"] = "No passed code available."; return state
|
1092 |
# --- CORRECTED LOOP ---
|
1093 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
1094 |
files_to_process = code_files_passed
|
1095 |
if not files_to_process: logger.error(f"Logic error: files_to_process empty in {func_name}"); code_str = "Error retrieving code."
|
1096 |
else:
|
|
|
1139 |
code_files_base = state.get("final_test_code_files", [])
|
1140 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1141 |
# --- CORRECTED LOOP ---
|
1142 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
1143 |
files_to_process = code_files_base
|
1144 |
if not files_to_process: logger.warning(f"No tested code for {func_name}"); code_content = "N/A"
|
1145 |
else:
|
|
|
1217 |
if not final_code: logger.error(f"No final code for {func_name}"); raise ValueError("Final code missing.")
|
1218 |
instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1219 |
# --- CORRECTED LOOP ---
|
1220 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
1221 |
files_to_process = final_code
|
1222 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_context = "No code files."
|
1223 |
else:
|
|
|
1266 |
func_name = "refine_deployment"
|
1267 |
current_plan = state.get('deployment_current_process', 'N/A'); ai_feedback = state.get('deployment_feedback', 'N/A'); human_feedback = state.get('deployment_human_feedback', 'N/A')
|
1268 |
# --- ADDED LOOP ---
|
1269 |
+
code_str_parts = []; total_len = 0; max_code_len = 25000
|
1270 |
final_code = state.get("final_code_files", []); instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
|
1271 |
files_to_process = final_code
|
1272 |
if not files_to_process: logger.warning(f"No files for {func_name}"); code_context = "No code files."
|
|
|
1306 |
state["final_deployment_path"] = filepath
|
1307 |
return state
|
1308 |
|
1309 |
+
# --- END OF SDLC.py ---
|
app.py
CHANGED
@@ -54,10 +54,41 @@ if not logger.handlers:
|
|
54 |
logger.info("Streamlit app logger configured.")
|
55 |
|
56 |
# --- Constants for Configuration ---
|
|
|
57 |
AVAILABLE_MODELS = {
|
58 |
-
"OpenAI": [
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
}
|
62 |
LLM_PROVIDERS = list(AVAILABLE_MODELS.keys())
|
63 |
|
|
|
54 |
logger.info("Streamlit app logger configured.")
|
55 |
|
56 |
# --- Constants for Configuration ---
|
57 |
+
# Define available providers and their models
|
58 |
AVAILABLE_MODELS = {
|
59 |
+
"OpenAI": [
|
60 |
+
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
|
61 |
+
"gpt-4o", "gpt-4o-2024-08-06",
|
62 |
+
"o1-mini", "o1-mini-2024-09-12",
|
63 |
+
"o3-mini", "o3-mini-2025-01-31",
|
64 |
+
],
|
65 |
+
"Groq": [
|
66 |
+
"llama3-8b-8192", "llama3-70b-8192", "llama-3.1-8b-instant",
|
67 |
+
"llama-3.2-1b-preview", "llama-3.2-3b-preview", "llama-3.3-70b-specdec",
|
68 |
+
"llama-3.3-70b-versatile", "mistral-saba-24b", "gemma2-9b-it",
|
69 |
+
"deepseek-r1-distill-llama-70b", "deepseek-r1-distill-qwen-32b",
|
70 |
+
"qwen-2.5-32b", "qwen-2.5-coder-32b", "qwen-qwq-32b",
|
71 |
+
"mixtral-8x7b-32768",
|
72 |
+
],
|
73 |
+
"Google": [
|
74 |
+
"gemini-1.5-pro-latest", "gemini-1.5-flash-latest",
|
75 |
+
"gemini-1.0-pro", "gemini-1.0-flash", "gemini-2.5-pro-exp-03-25", "gemini-2.0-flash",
|
76 |
+
],
|
77 |
+
"Anthropic": [
|
78 |
+
# Use API Identifiers (usually include date)
|
79 |
+
"claude-3-opus-20240229",
|
80 |
+
"claude-3-sonnet-20240229",
|
81 |
+
"claude-3-haiku-20240307",
|
82 |
+
"claude-3-5-haiku-latest",
|
83 |
+
"claude-3-5-sonnet-latest",
|
84 |
+
"claude-3-7-sonnet-latest"
|
85 |
+
],
|
86 |
+
"xAI": [
|
87 |
+
"grok-1", # Primary model available via API
|
88 |
+
"grok-2-latest",
|
89 |
+
"grok-3",
|
90 |
+
"grok-3-mini"
|
91 |
+
]
|
92 |
}
|
93 |
LLM_PROVIDERS = list(AVAILABLE_MODELS.keys())
|
94 |
|
requirements.txt
CHANGED
@@ -1,32 +1,33 @@
|
|
1 |
-
# Core Streamlit UI
|
2 |
-
streamlit
|
3 |
-
|
4 |
-
# LangChain Framework & Integrations
|
5 |
-
# NOTE: LangChain releases often, pin versions for stability
|
6 |
-
langchain
|
7 |
-
langchain-core
|
8 |
-
langchain-openai
|
9 |
-
langchain-groq
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
1 |
+
# Core Streamlit UI
|
2 |
+
streamlit>=1.30.0,<2.0.0
|
3 |
+
|
4 |
+
# LangChain Framework & Integrations
|
5 |
+
# NOTE: LangChain releases often, pin versions for stability
|
6 |
+
langchain>=0.1.14,<0.2.0
|
7 |
+
langchain-core>=0.1.40,<0.2.0
|
8 |
+
langchain-openai>=0.1.1,<0.2.0
|
9 |
+
langchain-groq>=0.1.2,<0.2.0
|
10 |
+
langchain-google-genai
|
11 |
+
langchain-anthropic
|
12 |
+
|
13 |
+
# Web Search Client
|
14 |
+
tavily-python>=0.3.3,<0.4.0
|
15 |
+
|
16 |
+
# Data Validation
|
17 |
+
pydantic>=2.0.0,<3.0.0
|
18 |
+
|
19 |
+
# Retry Logic
|
20 |
+
tenacity>=8.2.0,<9.0.0
|
21 |
+
|
22 |
+
# UML Diagram Generation (Requires Java runtime)
|
23 |
+
plantuml>=0.3.0,<0.4.0
|
24 |
+
|
25 |
+
# Environment Variable Loading
|
26 |
+
python-dotenv>=1.0.0,<2.0.0
|
27 |
+
|
28 |
+
# Typing Helpers (Often a dependency, but good to include explicitly)
|
29 |
+
typing-extensions>=4.8.0,<5.0.0
|
30 |
+
|
31 |
+
# HTTP Clients (Often dependencies of LLM/API clients, pin for stability)
|
32 |
+
httpx>=0.27.0,<0.28.0
|
33 |
+
requests>=2.31.0,<3.0.0
|