shaikhmohammedmujammil commited on
Commit
c16b68d
·
verified ·
1 Parent(s): 31fdcf2

added all of files via drag n drop

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. SDLC.py +1301 -0
  3. SDLC_Workflow_Graph_Diagram.png +3 -0
  4. app.py +653 -0
  5. requirements.txt +13 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ SDLC_Workflow_Graph_Diagram.png filter=lfs diff=lfs merge=lfs -text
SDLC.py ADDED
@@ -0,0 +1,1301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # newSDLC.py
2
+ import os
3
+ import sys
4
+ import shutil
5
+ from typing import List, Union, Dict, Annotated, Any
6
+ from typing_extensions import TypedDict
7
+ from pydantic import BaseModel, Field
8
+ from langchain.schema import AIMessage, HumanMessage
9
+ from langchain_core.language_models.base import BaseLanguageModel # Correct import path
10
+ from langchain_groq import ChatGroq
11
+ from langchain_openai import ChatOpenAI
12
+ # Add imports for other potential providers if needed
13
+ # from langchain_google_genai import ChatGoogleGenerativeAI
14
+ # from langchain_anthropic import ChatAnthropic
15
+ from tavily import TavilyClient
16
+ from dotenv import load_dotenv
17
+ import operator
18
+ import logging
19
+ import ast
20
+ import time
21
+ from plantuml import PlantUML
22
+ from functools import wraps
23
+ from tenacity import retry, stop_after_attempt, wait_exponential, wait_fixed, retry_if_exception_type
24
+
25
+ # --- Basic logging setup ---
26
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # --- Load Environment Variables ---
30
+ # Keep load_dotenv() in case some functions still rely on other env vars,
31
+ # but LLM/Tavily keys will now come from function args.
32
+ load_dotenv()
33
+
34
+ # --- REMOVED LLM / Tavily Initialization Block ---
35
+ # GLOBAL_LLM, OPENAI_LLM, tavily_client will be initialized dynamically
36
+
37
+ # --- Pydantic Models ---
38
+ # (Keep all Pydantic models as they were)
39
+ class DiagramSelection(BaseModel):
40
+ diagram_types: List[str] = Field(..., description="List of 5 selected UML/DFD diagram types")
41
+ justifications: List[str] = Field(..., description="Brief justifications for each diagram type")
42
+ class PlantUMLCode(BaseModel):
43
+ diagram_type: str = Field(..., description="Type of UML/DFD diagram")
44
+ code: str = Field(..., description="PlantUML code for the diagram")
45
+ class CodeFile(BaseModel):
46
+ filename: str = Field(..., description="Name of the file, including path relative to project root")
47
+ content: str = Field(..., description="Full content of the file")
48
+ class GeneratedCode(BaseModel):
49
+ files: List[CodeFile] = Field(..., description="List of all files in the project")
50
+ instructions: str = Field(..., description="Beginner-friendly setup and run instructions")
51
+ class TestCase(BaseModel):
52
+ description: str = Field(..., description="Description of the test case")
53
+ input_data: dict = Field(..., description="Fake input data, must be non-empty")
54
+ expected_output: dict = Field(..., description="Expected fake output, must be non-empty")
55
+ class TestCases(BaseModel):
56
+ test_cases: List[TestCase] = Field(..., description="List of test cases")
57
+
58
+ # --- Main State Definition ---
59
+ class MainState(TypedDict, total=False):
60
+ # --- ADDED instance storage ---
61
+ llm_instance: BaseLanguageModel | None # Store the initialized LLM
62
+ tavily_instance: TavilyClient | None # Store the initialized Tavily client
63
+ # --- END ADDED ---
64
+
65
+ # Core conversation history
66
+ messages: Annotated[List[Union[HumanMessage, AIMessage]], lambda x, y: (x or []) + (y or [])]
67
+
68
+ # Project definition
69
+ project_folder: str # Base name/relative path used for saving files
70
+ project: str
71
+ category: str
72
+ subcategory: str
73
+ coding_language: str
74
+
75
+ # User Input Cycle State
76
+ user_input_questions: List[str]
77
+ user_input_answers: List[str]
78
+ user_input_iteration: int
79
+ user_input_min_iterations: int
80
+ user_input_done: bool
81
+
82
+ # Core Artifacts
83
+ user_query_with_qa: str
84
+ refined_prompt: str
85
+ final_user_story: str
86
+ final_product_review: str
87
+ final_design_document: str
88
+ final_uml_codes: List[PlantUMLCode]
89
+ final_code_files: List[CodeFile]
90
+ final_code_review: str
91
+ final_security_issues: str
92
+ final_test_code_files: List[CodeFile]
93
+ final_quality_analysis: str
94
+ final_deployment_process: str
95
+
96
+ # File Paths
97
+ final_user_story_path: str
98
+ final_product_review_path: str
99
+ final_design_document_path: str
100
+ final_uml_diagram_folder: str
101
+ final_uml_png_paths: List[str]
102
+ final_review_security_folder: str
103
+ review_code_snapshot_folder: str
104
+ final_testing_folder: str
105
+ testing_passed_code_folder: str
106
+ final_quality_analysis_path: str
107
+ final_code_folder: str
108
+ final_deployment_path: str
109
+
110
+ # Intermediate States
111
+ user_story_current: str; user_story_feedback: str; user_story_human_feedback: str; user_story_done: bool;
112
+ product_review_current: str; product_review_feedback: str; product_review_human_feedback: str; product_review_done: bool;
113
+ design_doc_current: str; design_doc_feedback: str; design_doc_human_feedback: str; design_doc_done: bool;
114
+ uml_selected_diagrams: List[str]; uml_current_codes: List[PlantUMLCode]; uml_feedback: Dict[str, str]; uml_human_feedback: Dict[str, str]; uml_done: bool;
115
+ code_current: GeneratedCode;
116
+ code_human_input: str; code_web_search_results: str; code_feedback: str; code_human_feedback: str; code_done: bool;
117
+ code_review_current_feedback: str; security_current_feedback: str; review_security_human_feedback: str; review_security_done: bool;
118
+ test_cases_current: List[TestCase]; test_cases_feedback: str; test_cases_human_feedback: str; test_cases_passed: bool;
119
+ quality_current_analysis: str; quality_feedback: str; quality_human_feedback: str; quality_done: bool;
120
+ deployment_current_process: str; deployment_feedback: str; deployment_human_feedback: str; deployment_done: bool;
121
+
122
+
123
+ # --- Constants and Helper Functions ---
124
+ PLANTUML_SYNTAX_RULES = { # Keep the full dictionary
125
+ # ... (plantuml rules dictionary remains unchanged) ...
126
+ "Activity Diagram": {"template": "@startuml\nstart\nif (condition) then (yes)\n :action1;\nelse (no)\n :action2;\nendif\nwhile (condition)\n :action3;\nendwhile\nstop\n@enduml", "required_keywords": ["start", ":", "stop"], "notes": "Conditionals: if/else/endif. Loops: while/endwhile. Actions: :action;."},
127
+ "Sequence Diagram": {"template": "@startuml\nparticipant A\nparticipant B\nA -> B : message\nalt condition\n B --> A : success\nelse\n B --> A : failure\nend\n@enduml", "required_keywords": ["participant", "->", "-->"], "notes": "-> solid line, --> dashed line. alt/else/end for alternatives."},
128
+ "Use Case Diagram": {"template": "@startuml\nactor User\nusecase (UC1)\nUser --> (UC1)\n@enduml", "required_keywords": ["actor", "-->", "("], "notes": "Define actors and use cases, connect with -->."},
129
+ "Class Diagram": {"template": "@startuml\nclass MyClass {\n +field: Type\n +method()\n}\nMyClass --> OtherClass\n@enduml", "required_keywords": ["class", "{", "}", "-->"], "notes": "Define classes, attributes, methods. --> association, <|-- inheritance."},
130
+ "State Machine Diagram": {"template": "@startuml\n[*] --> State1\nState1 --> State2 : event [condition] / action\nState2 --> [*]\n@enduml", "required_keywords": ["[*]", "-->", ":"], "notes": "[*] start/end. --> transitions with event/condition/action."},
131
+ "Object Diagram": {"template": "@startuml\nobject obj1: Class1\nobj1 : attr = val\nobj1 --> obj2\n@enduml", "required_keywords": ["object", ":", "-->"], "notes": "Define objects (instances), set attributes, link."},
132
+ "Component Diagram": {"template": "@startuml\ncomponent Comp1\ninterface Iface\nComp1 ..> Iface\nComp1 --> Comp2\n@enduml", "required_keywords": ["component", "-->"], "notes": "Define components, interfaces. --> dependency, ..> usage."},
133
+ "Deployment Diagram": {"template": "@startuml\nnode Server {\n artifact app.jar\n}\n@enduml", "required_keywords": ["node", "artifact"], "notes": "Nodes for hardware/software envs, artifacts for deployed items."},
134
+ "Package Diagram": {"template": "@startuml\npackage \"My Package\" {\n class ClassA\n}\n@enduml", "required_keywords": ["package", "{"], "notes": "Group elements."},
135
+ "Composite Structure Diagram": {"template": "@startuml\nclass Composite {\n +part1 : Part1\n}\nComposite *-- Part1\n@enduml", "required_keywords": ["class", "{", "}", "*--"], "notes": "Show internal structure, *-- composition."},
136
+ "Timing Diagram": {"template": "@startuml\nrobust \"User\" as U\nconcise \"System\" as S\n@0\nU is Idle\nS is Ready\n@100\nU -> S : Request()\nS is Processing\n@300\nS --> U : Response()\nU is Active\nS is Ready\n@enduml", "required_keywords": ["@", "is"], "notes": "Show state changes over time."},
137
+ "Interaction Overview Diagram": {"template": "@startuml\nstart\nif (condition?) then (yes)\n ref over Actor : Interaction1\nelse (no)\n :Action A;\nendif\nstop\n@enduml", "required_keywords": ["start", ":", "ref", "stop"], "notes": "Combine activity diagrams with interaction refs."},
138
+ "Communication Diagram": {"template": "@startuml\nobject O1\nobject O2\nO1 -> O2 : message()\n@enduml", "required_keywords": ["object", "->", ":"], "notes": "Focus on object interactions."},
139
+ "Profile Diagram": {"template": "@startuml\nprofile MyProfile {\n stereotype MyStereotype\n}\n@enduml", "required_keywords": ["profile", "stereotype"], "notes": "Define custom stereotypes and tagged values."},
140
+ "Context Diagram (Level 0 DFD)": {"template": "@startuml\nrectangle System as S\nentity External as E\nE --> S : Data Input\nS --> E : Data Output\n@enduml", "required_keywords": ["rectangle", "entity", "-->", ":"], "notes": "System boundary, external entities, major data flows."},
141
+ "Level 1 DFD": {"template": "@startuml\nentity E\nrectangle P1\nrectangle P2\ndatabase DS\nE --> P1 : Input\nP1 --> P2 : Data\nP1 --> DS : Store\nP2 --> E : Output\n@enduml", "required_keywords": ["rectangle", "entity", "database", "-->", ":"], "notes": "Major processes, data stores, flows between them."},
142
+ "Level 2 DFD": {"template": "@startuml\nrectangle P1.1\nrectangle P1.2\ndatabase DS\nP1.1 --> P1.2 : Internal Data\nP1.2 --> DS : Store Detail\n@enduml", "required_keywords": ["rectangle", "-->", ":"], "notes": "Decomposition of Level 1 processes."},
143
+ "Level 3 DFD": {"template": "@startuml\nrectangle P1.1.1\nrectangle P1.1.2\nP1.1.1 --> P1.1.2 : Sub-detail\n@enduml", "required_keywords": ["rectangle", "-->", ":"], "notes": "Further decomposition."},
144
+ "General DFD": {"template": "@startuml\nentity E\nrectangle P\ndatabase DS\nE --> P : Input\nP --> DS : Store\nDS --> P : Retrieve\nP --> E : Output\n@enduml", "required_keywords": ["entity", "rectangle", "database", "-->", ":"], "notes": "Generic structure for DFDs."},
145
+ }
146
+
147
+ def validate_plantuml_code(diagram_type: str, code: str) -> bool:
148
+ # (validate_plantuml_code function remains unchanged)
149
+ if diagram_type not in PLANTUML_SYNTAX_RULES:
150
+ logger.warning(f"Unknown diagram type for validation: {diagram_type}")
151
+ return False
152
+ rules = PLANTUML_SYNTAX_RULES[diagram_type]
153
+ required_keywords = rules.get("required_keywords", [])
154
+ if not code:
155
+ logger.warning(f"Empty code provided for {diagram_type}.")
156
+ return False
157
+ code_cleaned = code.strip()
158
+ if not code_cleaned.startswith("@startuml"):
159
+ logger.warning(f"PlantUML code for {diagram_type} does not start with @startuml.")
160
+ if not code_cleaned.endswith("@enduml"):
161
+ logger.warning(f"PlantUML code for {diagram_type} does not end with @enduml.")
162
+ if required_keywords:
163
+ missing_keywords = [kw for kw in required_keywords if kw not in code]
164
+ if missing_keywords:
165
+ logger.warning(f"PlantUML code for {diagram_type} missing required keywords: {missing_keywords}.")
166
+ return True
167
+
168
+ # --- NEW: Initialization Function ---
169
+ def initialize_llm_clients(provider: str, model_name: str, llm_api_key: str, tavily_api_key: str) -> tuple[BaseLanguageModel | None, TavilyClient | None, str | None]:
170
+ """
171
+ Initializes LLM and Tavily clients based on user-provided configuration.
172
+
173
+ Args:
174
+ provider: Name of the LLM provider (e.g., "OpenAI", "Groq").
175
+ model_name: Name of the specific model to use.
176
+ llm_api_key: API key for the selected LLM provider.
177
+ tavily_api_key: API key for Tavily search.
178
+
179
+ Returns:
180
+ A tuple containing:
181
+ - llm_instance: Initialized LangChain LLM instance or None if failed.
182
+ - tavily_instance: Initialized Tavily client or None if key is missing/invalid.
183
+ - error_message: A string describing the error if initialization failed, otherwise None.
184
+ """
185
+ llm_instance = None
186
+ tavily_instance = None
187
+ error_message = None
188
+ provider_lower = provider.lower()
189
+
190
+ # --- Initialize LLM ---
191
+ try:
192
+ logger.info(f"Attempting to initialize LLM: Provider='{provider}', Model='{model_name}'")
193
+ if not llm_api_key:
194
+ raise ValueError("LLM API Key is required.")
195
+
196
+ if provider_lower == "openai":
197
+ llm_instance = ChatOpenAI(model=model_name, temperature=0.5, api_key=llm_api_key)
198
+ elif provider_lower == "groq":
199
+ llm_instance = ChatGroq(model=model_name, temperature=0.5, api_key=llm_api_key)
200
+ # Add elif blocks for other providers here
201
+ # elif provider_lower == "google":
202
+ # llm_instance = ChatGoogleGenerativeAI(model=model_name, google_api_key=llm_api_key, temperature=0.5)
203
+ # elif provider_lower == "anthropic":
204
+ # llm_instance = ChatAnthropic(model=model_name, anthropic_api_key=llm_api_key, temperature=0.5)
205
+ else:
206
+ raise ValueError(f"Unsupported LLM provider: {provider}")
207
+
208
+ # Optional: Simple test call to verify the key/model (can increase startup time)
209
+ # try:
210
+ # llm_instance.invoke("Test prompt")
211
+ # logger.info(f"LLM {provider} - {model_name} initialized and tested successfully.")
212
+ # except Exception as test_e:
213
+ # logger.error(f"LLM Verification failed for {provider} - {model_name}: {test_e}")
214
+ # raise ValueError(f"LLM key or model name might be invalid for {provider}. Error: {test_e}") from test_e
215
+ logger.info(f"LLM {provider} - {model_name} initialized successfully.")
216
+
217
+ except ValueError as ve:
218
+ error_message = str(ve)
219
+ logger.error(f"LLM Initialization Error: {error_message}")
220
+ llm_instance = None
221
+ except Exception as e:
222
+ error_message = f"An unexpected error occurred during LLM initialization for {provider}: {e}"
223
+ logger.error(error_message, exc_info=True)
224
+ llm_instance = None
225
+
226
+ # --- Initialize Tavily ---
227
+ if tavily_api_key:
228
+ try:
229
+ logger.info("Attempting to initialize Tavily client...")
230
+ tavily_instance = TavilyClient(api_key=tavily_api_key)
231
+ # Optional: Test Tavily connectivity (e.g., search for 'test')
232
+ # try:
233
+ # tavily_instance.search("test", max_results=1)
234
+ # logger.info("Tavily client initialized and tested successfully.")
235
+ # except Exception as tav_e:
236
+ # logger.error(f"Tavily API key might be invalid. Error: {tav_e}")
237
+ # if error_message is None: # Prioritize LLM error message
238
+ # error_message = f"Tavily API key might be invalid: {tav_e}"
239
+ # tavily_instance = None # Failed test
240
+ logger.info("Tavily client initialized successfully.")
241
+ except Exception as e:
242
+ tavily_err = f"Failed to initialize Tavily client: {e}"
243
+ logger.error(tavily_err, exc_info=True)
244
+ if error_message is None: # Prioritize LLM error message
245
+ error_message = tavily_err
246
+ tavily_instance = None
247
+ else:
248
+ logger.warning("Tavily API Key not provided. Web search will be disabled.")
249
+ tavily_instance = None
250
+
251
+ return llm_instance, tavily_instance, error_message
252
+
253
+ # --- Modified Retry Decorator ---
254
+ # Removed the initial GLOBAL_LLM check
255
+ def with_retry(func):
256
+ """Decorator to add retry logic to functions, especially LLM calls."""
257
+ @wraps(func)
258
+ @retry(
259
+ stop=stop_after_attempt(3),
260
+ wait=wait_exponential(multiplier=1, min=2, max=10),
261
+ retry=retry_if_exception_type(Exception),
262
+ before_sleep=lambda rs: logger.warning(
263
+ f"Retrying {func.__name__} (attempt {rs.attempt_number}) after {rs.next_action.sleep:.2f}s delay..."
264
+ )
265
+ )
266
+ def wrapper(*args, **kwargs):
267
+ try:
268
+ # Execute the decorated function
269
+ return func(*args, **kwargs)
270
+ except Exception as e:
271
+ # Log the error after all retries have failed
272
+ logger.error(f"Error in {func.__name__} after retries: {e}", exc_info=True)
273
+ raise # Re-raise the exception
274
+ return wrapper
275
+
276
+ # --- Workflow Functions ---
277
+ # --- MODIFIED TO USE state['llm_instance'] and state['tavily_instance'] ---
278
+
279
+ # --- User Input Cycle ---
280
+ @with_retry
281
+ def generate_questions(state: MainState) -> MainState:
282
+ """Generates clarification questions."""
283
+ llm = state.get('llm_instance')
284
+ if not llm: raise ConnectionError("LLM instance not found in state.")
285
+ if 'messages' not in state: state['messages'] = []
286
+ context = f"Project: {state['project']} ({state['category']}/{state['subcategory']}) in {state['coding_language']}."
287
+ iteration = state.get("user_input_iteration", 0)
288
+ if iteration == 0:
289
+ prompt = f"You are a requirements analyst. Ask exactly 5 concise questions to clarify the initial needs for this project: {context}"
290
+ else:
291
+ qa_history = "\n".join([f"Q: {q}\nA: {a}" for q, a in zip(state.get("user_input_questions",[]), state.get("user_input_answers",[]))])
292
+ prompt = f"Based on the previous Q&A for the project ({context}), ask up to 5 more concise clarification questions...\nPrevious Q&A:\n{qa_history}"
293
+ response = llm.invoke(prompt) # Use LLM from state
294
+ questions = [q.strip() for q in response.content.strip().split("\n") if q.strip()]
295
+ state["user_input_questions"] = state.get("user_input_questions", []) + questions
296
+ state["messages"].append(AIMessage(content="\n".join(questions)))
297
+ logger.info(f"Generated {len(questions)} questions for iteration {iteration}.")
298
+ return state
299
+
300
+ @with_retry
301
+ def refine_prompt(state: MainState) -> MainState:
302
+ """Synthesizes Q&A into a refined prompt."""
303
+ llm = state.get('llm_instance')
304
+ if not llm: raise ConnectionError("LLM instance not found in state.")
305
+ if 'messages' not in state: state['messages'] = []
306
+ qa_history = "\n".join([f"Q: {q}\nA: {a}" for q, a in zip(state.get("user_input_questions",[]), state.get("user_input_answers",[]))])
307
+ prompt = f"Based on the following Q&A history for project '{state['project']}', synthesize a concise 'Refined Prompt'...\nQ&A History:\n{qa_history}\n---\nOutput ONLY the refined prompt text."
308
+ response = llm.invoke(prompt) # Use LLM from state
309
+ refined_prompt_text = response.content.strip()
310
+ state["refined_prompt"] = refined_prompt_text
311
+ state["user_query_with_qa"] = qa_history
312
+ state["messages"].append(AIMessage(content=f"Refined Prompt:\n{refined_prompt_text}"))
313
+ logger.info("Refined project prompt based on Q&A.")
314
+ # Save logic remains the same
315
+ try:
316
+ project_folder_name = state.get("project_folder", "default_project")
317
+ abs_project_folder = os.path.abspath(project_folder_name)
318
+ intro_dir = os.path.join(abs_project_folder, "1_intro")
319
+ os.makedirs(intro_dir, exist_ok=True)
320
+ qa_path = os.path.join(intro_dir, "user_query_with_qa.txt")
321
+ prompt_path = os.path.join(intro_dir, "refined_prompt.md")
322
+ with open(qa_path, "w", encoding="utf-8") as f: f.write(qa_history)
323
+ with open(prompt_path, "w", encoding="utf-8") as f: f.write(refined_prompt_text)
324
+ logger.info(f"Saved Q&A history and refined prompt to {intro_dir}")
325
+ except Exception as e: logger.error(f"Failed to save intro files: {e}", exc_info=True)
326
+ return state
327
+
328
+ # --- User Story Cycle ---
329
+ @with_retry
330
+ def generate_initial_user_stories(state: MainState) -> MainState:
331
+ """Generates initial user stories."""
332
+ llm = state.get('llm_instance')
333
+ if not llm: raise ConnectionError("LLM instance not found in state.")
334
+ if 'messages' not in state: state['messages'] = []
335
+ prompt = f"Generate a list of user stories for project '{state['project']}' using standard format 'As a..., I want..., so that...'. Base on:\nRefined Prompt:\n{state['refined_prompt']}"
336
+ response = llm.invoke(prompt) # Use LLM from state
337
+ initial_user_stories = response.content.strip()
338
+ state["user_story_current"] = initial_user_stories
339
+ state["messages"].append(AIMessage(content=f"Initial User Stories:\n{initial_user_stories}"))
340
+ logger.info("Generated Initial User Stories.")
341
+ return state
342
+
343
+ @with_retry
344
+ def generate_user_story_feedback(state: MainState) -> MainState:
345
+ """Generates AI feedback on user stories."""
346
+ llm = state.get('llm_instance')
347
+ if not llm: raise ConnectionError("LLM instance not found in state.")
348
+ if 'messages' not in state: state['messages'] = []
349
+ prompt = f"Act as QA. Review user stories for clarity, atomicity, testability, alignment...\nUser Stories:\n{state.get('user_story_current', 'N/A')}\n---\nRefined Prompt (Context):\n{state.get('refined_prompt', 'N/A')[:500]}..."
350
+ response = llm.invoke(prompt) # Use LLM from state
351
+ feedback = response.content.strip()
352
+ state["user_story_feedback"] = feedback
353
+ state["messages"].append(AIMessage(content=f"User Story Feedback:\n{feedback}"))
354
+ logger.info("Generated feedback on user stories.")
355
+ return state
356
+
357
+ @with_retry
358
+ def refine_user_stories(state: MainState) -> MainState:
359
+ """Refines user stories based on feedback."""
360
+ llm = state.get('llm_instance')
361
+ if not llm: raise ConnectionError("LLM instance not found in state.")
362
+ if 'messages' not in state: state['messages'] = []
363
+ prompt = f"Refine user stories for '{state['project']}' based on feedback.\nCurrent Stories:\n{state.get('user_story_current', 'N/A')}\nAI FB:\n{state.get('user_story_feedback', 'N/A')}\nHuman FB:\n{state.get('user_story_human_feedback', 'N/A')}\n---\nOutput refined list."
364
+ response = llm.invoke(prompt) # Use LLM from state
365
+ refined_user_stories = response.content.strip()
366
+ state["user_story_current"] = refined_user_stories
367
+ state["messages"].append(AIMessage(content=f"Refined User Stories:\n{refined_user_stories}"))
368
+ logger.info("Refined User Stories based on feedback.")
369
+ return state
370
+
371
+ # save_final_user_story remains unchanged (no LLM calls)
372
+ def save_final_user_story(state: MainState) -> MainState:
373
+ """Saves the final version of user stories to a file and updates the state."""
374
+ state["final_user_story"] = state.get("user_story_current", "No user stories generated.")
375
+ filepath = None # Initialize path as None
376
+ try:
377
+ abs_project_folder = os.path.abspath(state["project_folder"])
378
+ us_dir = os.path.join(abs_project_folder, "2_user_story")
379
+ os.makedirs(us_dir, exist_ok=True)
380
+ filepath = os.path.join(us_dir, "final_user_story.md")
381
+ with open(filepath, "w", encoding="utf-8") as f:
382
+ f.write(state["final_user_story"])
383
+ logger.info(f"Saved final user story to: {filepath}")
384
+ except Exception as e:
385
+ logger.error(f"Failed to save final user story: {e}", exc_info=True)
386
+ filepath = None # Ensure path is None if saving failed
387
+ state["final_user_story_path"] = filepath
388
+ return state
389
+
390
+ # --- Product Owner Review Cycle ---
391
+ @with_retry
392
+ def generate_initial_product_review(state: MainState) -> MainState:
393
+ """Generates an initial product review."""
394
+ llm = state.get('llm_instance')
395
+ if not llm: raise ConnectionError("LLM instance not found in state.")
396
+ if 'messages' not in state: state['messages'] = []
397
+ prompt = f"Act as Product Owner for '{state['project']}'. Review prompt and stories, assess alignment, completeness, concerns...\nPrompt:\n{state.get('refined_prompt', 'N/A')}\nStories:\n{state.get('final_user_story', 'N/A')}"
398
+ response = llm.invoke(prompt) # Use LLM from state
399
+ initial_review = response.content.strip()
400
+ state["product_review_current"] = initial_review
401
+ state["messages"].append(AIMessage(content=f"Initial Product Review:\n{initial_review}"))
402
+ logger.info("Generated initial product owner review.")
403
+ return state
404
+
405
+ @with_retry
406
+ def generate_product_review_feedback(state: MainState) -> MainState:
407
+ """Generates AI feedback on the product review."""
408
+ llm = state.get('llm_instance')
409
+ if not llm: raise ConnectionError("LLM instance not found in state.")
410
+ if 'messages' not in state: state['messages'] = []
411
+ prompt = f"Review the PO assessment for clarity, logic, priorities...\nPO Review:\n{state.get('product_review_current', 'N/A')}\nStories (Context):\n{state.get('final_user_story', 'N/A')[:1000]}..."
412
+ response = llm.invoke(prompt) # Use LLM from state
413
+ feedback = response.content.strip()
414
+ state["product_review_feedback"] = feedback
415
+ state["messages"].append(AIMessage(content=f"Product Review Feedback:\n{feedback}"))
416
+ logger.info("Generated feedback on product review.")
417
+ return state
418
+
419
+ @with_retry
420
+ def refine_product_review(state: MainState) -> MainState:
421
+ """Refines the product review based on feedback."""
422
+ llm = state.get('llm_instance')
423
+ if not llm: raise ConnectionError("LLM instance not found in state.")
424
+ if 'messages' not in state: state['messages'] = []
425
+ prompt = f"Refine the PO review for '{state['project']}' based on feedback.\nCurrent:\n{state.get('product_review_current', 'N/A')}\nAI FB:\n{state.get('product_review_feedback', 'N/A')}\nHuman FB:\n{state.get('product_review_human_feedback', 'N/A')}\n---\nOutput refined review."
426
+ response = llm.invoke(prompt) # Use LLM from state
427
+ refined_review = response.content.strip()
428
+ state["product_review_current"] = refined_review
429
+ state["messages"].append(AIMessage(content=f"Refined Product Review:\n{refined_review}"))
430
+ logger.info("Refined product owner review.")
431
+ return state
432
+
433
+ # save_final_product_review remains unchanged
434
+ def save_final_product_review(state: MainState) -> MainState:
435
+ """Saves the final product review to a file."""
436
+ state["final_product_review"] = state.get("product_review_current", "No review generated.")
437
+ filepath = None
438
+ try:
439
+ abs_project_folder = os.path.abspath(state["project_folder"])
440
+ pr_dir = os.path.join(abs_project_folder, "3_product_review")
441
+ os.makedirs(pr_dir, exist_ok=True)
442
+ filepath = os.path.join(pr_dir, "final_product_review.md")
443
+ with open(filepath, "w", encoding="utf-8") as f:
444
+ f.write(state["final_product_review"])
445
+ logger.info(f"Saved final product review to: {filepath}")
446
+ except Exception as e:
447
+ logger.error(f"Failed to save final product review: {e}", exc_info=True)
448
+ filepath = None
449
+ state["final_product_review_path"] = filepath
450
+ return state
451
+
452
+ # --- Design Document Cycle ---
453
+ @with_retry
454
+ def generate_initial_design_doc(state: MainState) -> MainState:
455
+ """Generates the initial design document."""
456
+ llm = state.get('llm_instance')
457
+ if not llm: raise ConnectionError("LLM instance not found in state.")
458
+ if 'messages' not in state: state['messages'] = []
459
+ prompt = f"Act as System Architect for '{state['project']}'. Create high-level design (Arch, Components, Data, API, Tech, Deploy) based on...\nPrompt:\n{state.get('refined_prompt', 'N/A')}\nStories:\n{state.get('final_user_story', 'N/A')}\nReview:\n{state.get('final_product_review', 'N/A')}"
460
+ response = llm.invoke(prompt) # Use LLM from state
461
+ initial_doc = response.content.strip()
462
+ state["design_doc_current"] = initial_doc
463
+ state["messages"].append(AIMessage(content=f"Initial Design Document:\n{initial_doc}"))
464
+ logger.info("Generated Initial Design Document")
465
+ return state
466
+
467
+ @with_retry
468
+ def generate_design_doc_feedback(state: MainState) -> MainState:
469
+ """Generates AI feedback on the design document."""
470
+ llm = state.get('llm_instance')
471
+ if not llm: raise ConnectionError("LLM instance not found in state.")
472
+ if 'messages' not in state: state['messages'] = []
473
+ prompt = f"Review Design Doc for completeness, clarity, consistency, feasibility...\nDoc:\n{state.get('design_doc_current', 'N/A')}\nStories (Context):\n{state.get('final_user_story', 'N/A')[:1000]}..."
474
+ response = llm.invoke(prompt) # Use LLM from state
475
+ feedback = response.content.strip()
476
+ state["design_doc_feedback"] = feedback
477
+ state["messages"].append(AIMessage(content=f"Design Document Feedback:\n{feedback}"))
478
+ logger.info("Generated Design Document Feedback")
479
+ return state
480
+
481
+ @with_retry
482
+ def refine_design_doc(state: MainState) -> MainState:
483
+ """Refines the design document based on feedback."""
484
+ llm = state.get('llm_instance')
485
+ if not llm: raise ConnectionError("LLM instance not found in state.")
486
+ if 'messages' not in state: state['messages'] = []
487
+ prompt = f"Refine Design Doc for '{state['project']}' based on feedback.\nCurrent:\n{state.get('design_doc_current', 'N/A')}\nAI FB:\n{state.get('design_doc_feedback', 'N/A')}\nHuman FB:\n{state.get('design_doc_human_feedback', 'N/A')}\n---\nOutput refined doc."
488
+ response = llm.invoke(prompt) # Use LLM from state
489
+ refined_doc = response.content.strip()
490
+ state["design_doc_current"] = refined_doc
491
+ state["messages"].append(AIMessage(content=f"Refined Design Document:\n{refined_doc}"))
492
+ logger.info("Refined Design Document")
493
+ return state
494
+
495
+ # save_final_design_doc remains unchanged
496
+ def save_final_design_doc(state: MainState) -> MainState:
497
+ """Saves the final design document."""
498
+ state["final_design_document"] = state.get("design_doc_current", "No design generated.")
499
+ filepath = None
500
+ try:
501
+ abs_project_folder = os.path.abspath(state["project_folder"])
502
+ dd_dir = os.path.join(abs_project_folder, "4_design_doc")
503
+ os.makedirs(dd_dir, exist_ok=True)
504
+ filepath = os.path.join(dd_dir, "final_design_document.md")
505
+ with open(filepath, "w", encoding="utf-8") as f: f.write(state["final_design_document"])
506
+ logger.info(f"Saved final design doc: {filepath}")
507
+ except Exception as e: logger.error(f"Failed save design doc: {e}", exc_info=True); filepath = None
508
+ state["final_design_document_path"] = filepath
509
+ return state
510
+
511
+
512
+ # --- UML Diagram Cycle ---
513
+ @with_retry
514
+ def select_uml_diagrams(state: MainState) -> MainState:
515
+ """Selects relevant UML/DFD diagram types."""
516
+ llm = state.get('llm_instance')
517
+ if not llm: raise ConnectionError("LLM instance not found in state.")
518
+ if 'messages' not in state: state['messages'] = []
519
+ all_diagram_types = ', '.join(PLANTUML_SYNTAX_RULES.keys())
520
+ prompt = f"Select 5 most relevant UML/DFD types for '{state['project']}' from list [{all_diagram_types}] based on Design Doc:\n{state.get('final_design_document', 'N/A')}\nJustify choices. Output ONLY JSON (DiagramSelection model)."
521
+ structured_llm = llm.with_structured_output(DiagramSelection) # Use LLM from state
522
+ response = structured_llm.invoke(prompt)
523
+ unique_types = list(dict.fromkeys(response.diagram_types))[:5]
524
+ final_justifications = response.justifications[:len(unique_types)]
525
+ state["uml_selected_diagrams"] = unique_types
526
+ display_msg = "Selected Diagrams:\n" + "\n".join(f"- {dt} - {j}" for dt, j in zip(unique_types, final_justifications))
527
+ state["messages"].append(AIMessage(content=display_msg))
528
+ logger.info(f"Selected UML Diagrams: {', '.join(unique_types)}")
529
+ return state
530
+
531
+ @with_retry
532
+ def generate_initial_uml_codes(state: MainState) -> MainState:
533
+ """Generates initial PlantUML code for selected diagram types."""
534
+ llm = state.get('llm_instance')
535
+ if not llm: raise ConnectionError("LLM instance not found in state.")
536
+ if 'messages' not in state: state['messages'] = []
537
+ generated_codes = []
538
+ selected_diagrams = state.get("uml_selected_diagrams", [])
539
+ if not selected_diagrams: logger.warning("No diagrams selected."); state["uml_current_codes"] = []; return state
540
+
541
+ logger.info(f"Generating initial PlantUML code for: {', '.join(selected_diagrams)}")
542
+ for diagram_type in selected_diagrams:
543
+ syntax_info = PLANTUML_SYNTAX_RULES.get(diagram_type, {})
544
+ default_code = "@startuml\n' Default template\n@enduml"
545
+ code_to_use = syntax_info.get("template", default_code)
546
+ prompt = f"Generate PlantUML code for a '{diagram_type}' for '{state['project']}'. Base on Design Doc:\n{state.get('final_design_document', 'N/A')[:2000]}...\nAdhere to syntax:\nTemplate:\n{syntax_info.get('template', 'N/A')}\nNotes: {syntax_info.get('notes', 'N/A')}\n---\nGenerate ONLY the PlantUML code block."
547
+ try:
548
+ structured_llm = llm.with_structured_output(PlantUMLCode) # Use LLM from state
549
+ response = structured_llm.invoke(prompt)
550
+ generated_code = response.code.strip() if response and response.code else ""
551
+ if validate_plantuml_code(diagram_type, generated_code): code_to_use = generated_code
552
+ else: logger.warning(f"Generated code for {diagram_type} failed validation. Using template.")
553
+ except Exception as e: logger.error(f"Failed to generate/validate PlantUML for {diagram_type}: {e}. Using template.", exc_info=True)
554
+ generated_codes.append(PlantUMLCode(diagram_type=diagram_type, code=code_to_use))
555
+
556
+ state["uml_current_codes"] = generated_codes
557
+ summary = "\n".join([f"**{c.diagram_type}**:\n```plantuml\n{c.code}\n```" for c in generated_codes])
558
+ state["messages"].append(AIMessage(content=f"Generated Initial UML Codes:\n{summary}"))
559
+ logger.info(f"Generated initial code for {len(generated_codes)} UML diagrams.")
560
+ return state
561
+
562
+ @with_retry
563
+ def generate_uml_feedback(state: MainState) -> MainState:
564
+ """Generates AI feedback for each current UML diagram."""
565
+ # Use primary LLM from state, fallback needed? Or rely on app config? Assuming primary.
566
+ llm = state.get('llm_instance')
567
+ if not llm: raise ConnectionError("LLM instance not found in state.")
568
+ if 'messages' not in state: state['messages'] = []
569
+ feedback_dict = {}
570
+ current_codes = state.get('uml_current_codes', [])
571
+ if not current_codes: logger.warning("No UML codes for feedback."); state["uml_feedback"] = {}; return state
572
+
573
+ logger.info(f"Generating feedback for {len(current_codes)} UML diagrams.")
574
+ for plantuml_code in current_codes:
575
+ diagram_type = plantuml_code.diagram_type; code_to_review = plantuml_code.code
576
+ syntax_info = PLANTUML_SYNTAX_RULES.get(diagram_type, {})
577
+ prompt = f"Review PlantUML code for '{diagram_type}' of '{state['project']}'. Check Syntax, Alignment with Design, Clarity.\nSyntax (Ref):\n{syntax_info.get('template', 'N/A')}\nNotes: {syntax_info.get('notes', 'N/A')}\nCode:\n```plantuml\n{code_to_review}\n```\nDesign (Context):\n{state.get('final_design_document', 'N/A')[:1000]}...\n---\nProvide feedback."
578
+ try:
579
+ # Maybe use OPENAI_LLM if available and different? For now, use primary.
580
+ response = llm.invoke(prompt) # Use LLM from state
581
+ feedback_dict[diagram_type] = response.content.strip()
582
+ except Exception as e: logger.error(f"Failed feedback for {diagram_type}: {e}"); feedback_dict[diagram_type] = f"Error: {e}"
583
+
584
+ state["uml_feedback"] = feedback_dict
585
+ summary = "\n\n".join([f"**Feedback for {dt}:**\n{fb}" for dt, fb in feedback_dict.items()])
586
+ state["messages"].append(AIMessage(content=f"UML Feedback Provided:\n{summary}"))
587
+ logger.info("Generated feedback for all current UML diagrams.")
588
+ return state
589
+
590
+ @with_retry
591
+ def refine_uml_codes(state: MainState) -> MainState:
592
+ """Refines UML codes based on feedback."""
593
+ llm = state.get('llm_instance')
594
+ if not llm: raise ConnectionError("LLM instance not found in state.")
595
+ if 'messages' not in state: state['messages'] = []
596
+ refined_codes_list = []
597
+ current_codes = state.get('uml_current_codes', [])
598
+ ai_feedback = state.get('uml_feedback', {})
599
+ human_feedback = state.get('uml_human_feedback', {})
600
+ if not current_codes: logger.warning("No UML codes to refine."); return state
601
+
602
+ logger.info(f"Refining {len(current_codes)} UML diagrams.")
603
+ for plantuml_code_obj in current_codes:
604
+ diagram_type = plantuml_code_obj.diagram_type; current_code = plantuml_code_obj.code
605
+ syntax_info = PLANTUML_SYNTAX_RULES.get(diagram_type, {})
606
+ specific_human_feedback = human_feedback.get(diagram_type, human_feedback.get('all', 'N/A'))
607
+ prompt = f"Refine PlantUML for '{diagram_type}' of '{state['project']}' based on feedback.\nSyntax (Ref):\n{syntax_info.get('template', 'N/A')}\nNotes: {syntax_info.get('notes', 'N/A')}\nCurrent:\n```plantuml\n{current_code}\n```\nAI FB:\n{ai_feedback.get(diagram_type, 'N/A')}\nHuman FB:\n{specific_human_feedback}\n---\nGenerate ONLY refined PlantUML block."
608
+ try:
609
+ structured_llm = llm.with_structured_output(PlantUMLCode) # Use LLM from state
610
+ response = structured_llm.invoke(prompt)
611
+ refined_code = response.code.strip() if response and response.code else ""
612
+ if validate_plantuml_code(diagram_type, refined_code):
613
+ refined_codes_list.append(PlantUMLCode(diagram_type=diagram_type, code=refined_code))
614
+ else: logger.warning(f"Refined {diagram_type} invalid. Reverting."); refined_codes_list.append(plantuml_code_obj)
615
+ except Exception as e: logger.error(f"Failed refine {diagram_type}: {e}. Reverting.", exc_info=True); refined_codes_list.append(plantuml_code_obj)
616
+
617
+ state["uml_current_codes"] = refined_codes_list
618
+ summary = "\n".join([f"**{c.diagram_type} (Refined):**\n```plantuml\n{c.code}\n```" for c in refined_codes_list])
619
+ state["messages"].append(AIMessage(content=f"Refined UML Codes:\n{summary}"))
620
+ logger.info(f"Refined {len(refined_codes_list)} UML diagrams.")
621
+ return state
622
+
623
+ # save_final_uml_diagrams remains unchanged (no LLM calls)
624
+ def save_final_uml_diagrams(state: MainState) -> MainState:
625
+ """Saves the final Puml files and attempts to generate PNGs."""
626
+ state["final_uml_codes"] = state.get("uml_current_codes", [])
627
+ png_paths = [] # List to store paths of successfully generated PNGs
628
+ uml_dir = None
629
+ try:
630
+ abs_project_folder = os.path.abspath(state["project_folder"])
631
+ uml_dir = os.path.join(abs_project_folder, "5_uml_diagrams")
632
+ os.makedirs(uml_dir, exist_ok=True)
633
+ state["final_uml_diagram_folder"] = uml_dir # Store path to folder
634
+ can_generate_png = False
635
+ server = None
636
+ try:
637
+ server = PlantUML(url="http://www.plantuml.com/plantuml/png/")
638
+ can_generate_png = True
639
+ logger.info("PlantUML server connection appears OK.")
640
+ except Exception as p_e:
641
+ logger.warning(f"PlantUML server connection failed: {p_e}. PNG generation will be skipped. Check Java/PlantUML setup and network connectivity.", exc_info=True)
642
+ if not state["final_uml_codes"]:
643
+ logger.warning("No UML codes found to save."); state["final_uml_png_paths"] = []; return state
644
+ logger.info(f"Saving {len(state['final_uml_codes'])} UML diagrams to {uml_dir}...")
645
+ for i, pc in enumerate(state["final_uml_codes"], 1):
646
+ safe_type_name = "".join(c if c.isalnum() or c in ['_','-'] else '_' for c in pc.diagram_type).lower()
647
+ name = f"diagram_{i}_{safe_type_name}"
648
+ puml_path = os.path.join(uml_dir, f"{name}.puml")
649
+ png_path = os.path.join(uml_dir, f"{name}.png")
650
+ try:
651
+ with open(puml_path, "w", encoding="utf-8") as f: f.write(pc.code)
652
+ logger.debug(f"Saved PUML file: {puml_path}")
653
+ except Exception as file_e: logger.error(f"Error saving PUML file {puml_path}: {file_e}", exc_info=True); continue
654
+ if can_generate_png and server:
655
+ logger.debug(f"Attempting PNG generation for {name}...")
656
+ try:
657
+ server.processes_file(filename=puml_path, outfile=png_path)
658
+ if os.path.exists(png_path) and os.path.getsize(png_path) > 0:
659
+ logger.info(f"Successfully generated PNG: {png_path}"); png_paths.append(png_path)
660
+ else: logger.error(f"PlantUML processed '{name}' but output PNG is missing or empty: {png_path}")
661
+ except FileNotFoundError as fnf_err: logger.error(f"PNG generation failed for {name}: Executable/Java not found? Error: {fnf_err}", exc_info=False)
662
+ except Exception as png_e: logger.error(f"PNG generation failed for {name} ({pc.diagram_type}): {png_e}", exc_info=False)
663
+ elif not can_generate_png: logger.debug(f"Skipping PNG generation for {name} due to server connection issue.")
664
+ state["final_uml_png_paths"] = png_paths
665
+ logger.info(f"Finished UML saving. Saved {len(state['final_uml_codes'])} PUML files. Generated {len(png_paths)} PNG files.")
666
+ except Exception as e:
667
+ logger.error(f"General error in save_final_uml_diagrams: {e}", exc_info=True)
668
+ state["final_uml_diagram_folder"] = None; state["final_uml_png_paths"] = []
669
+ return state
670
+
671
+
672
+ # --- Code Generation Cycle ---
673
+ @with_retry
674
+ def generate_initial_code(state: MainState) -> MainState:
675
+ """Generates the initial codebase."""
676
+ llm = state.get('llm_instance')
677
+ if not llm: raise ConnectionError("LLM instance not found in state.")
678
+ if 'messages' not in state: state['messages'] = []
679
+ uml_types = ', '.join([c.diagram_type for c in state.get('final_uml_codes', [])])
680
+ prompt = f"Generate complete, runnable '{state['coding_language']}' project for '{state['project']}'. Base on Design Doc, User Stories, and UML ({uml_types}). Include main scripts, modules, requirements, basic README, comments.\nDesign:\n{state.get('final_design_document', 'N/A')}\nStories (Context):\n{state.get('final_user_story', 'N/A')[:1000]}...\n---\nOutput ONLY JSON (GeneratedCode model)."
681
+ structured_llm = llm.with_structured_output(GeneratedCode) # Use LLM from state
682
+ response = structured_llm.invoke(prompt)
683
+ if not response or not isinstance(response, GeneratedCode) or not response.files:
684
+ logger.error("Initial code gen failed or invalid format."); raise ValueError("Did not produce expected file structure.")
685
+ state["code_current"] = response
686
+ summary = f"Generated {len(response.files)} files. Key: {', '.join([f.filename for f in response.files[:3]])}...\nInstructions:\n{response.instructions[:200]}..."
687
+ state["messages"].append(AIMessage(content=f"Initial Code Generation:\n{summary}"))
688
+ logger.info(f"Generated initial code with {len(response.files)} files.")
689
+ return state
690
+
691
+ @with_retry
692
+ def web_search_code(state: MainState) -> MainState:
693
+ """Performs web search based on user feedback."""
694
+ tavily = state.get('tavily_instance') # Use Tavily from state
695
+ if not tavily: logger.warning("Tavily client not in state, skipping web search."); state["code_web_search_results"] = "Skipped (Tavily client not configured)"; state["messages"].append(AIMessage(content="Web Search: Skipped")); return state
696
+ if 'messages' not in state: state['messages'] = []
697
+ human_input = state.get('code_human_input', '')
698
+ if not human_input or not human_input.strip(): logger.info("Skipping web search - no issue provided."); state["code_web_search_results"] = "Skipped (No specific issue)"; state["messages"].append(AIMessage(content="Web Search: Skipped")); return state
699
+ human_input_summary = human_input[:200]; coding_language = state.get('coding_language', 'programming'); project_context = state.get('project', 'project')[:50]
700
+ search_query = f"{coding_language} issues related to '{human_input_summary}' in {project_context}"
701
+ logger.info(f"Performing Tavily search: {search_query}")
702
+ try:
703
+ response = tavily.search(query=search_query, search_depth="basic", max_results=3) # Use tavily from state
704
+ search_results = response.get("results", [])
705
+ if search_results:
706
+ results_text = "\n\n".join([f"**{r.get('title', 'N/A')}**\nURL: {r.get('url', 'N/A')}\nSnippet: {r.get('content', 'N/A')[:300]}..." for r in search_results])
707
+ state["code_web_search_results"] = results_text; logger.info(f"Tavily found {len(search_results)} results.")
708
+ else: state["code_web_search_results"] = "No relevant results found."; logger.info("Tavily found no results.")
709
+ except Exception as e:
710
+ error_detail = str(e); logger.error(f"Tavily search failed: {error_detail}", exc_info=True); state["code_web_search_results"] = f"Error during web search: {e}"
711
+ summary = state['code_web_search_results'][:500] + ('...' if len(state['code_web_search_results']) > 500 else '')
712
+ state["messages"].append(AIMessage(content=f"Web Search Summary:\n{summary}"))
713
+ logger.info("Completed Web Search.")
714
+ return state
715
+
716
+ @with_retry
717
+ def generate_code_feedback(state: MainState) -> MainState:
718
+ """Generates AI feedback on the current code."""
719
+ llm = state.get('llm_instance')
720
+ if not llm: raise ConnectionError("LLM instance not found in state.")
721
+ if 'messages' not in state: state['messages'] = []
722
+ func_name = "generate_code_feedback"
723
+ code_c = state.get("code_current"); instructions = ""
724
+ # --- CORRECTED LOOP ---
725
+ code_str_parts = []; total_len = 0; max_code_len = 4000
726
+ files_to_process = code_c.files if code_c and isinstance(code_c, GeneratedCode) else []
727
+ if not files_to_process: logger.warning(f"No files in code_current for {func_name}"); code_content = "No code files provided."; instructions = "N/A"
728
+ else:
729
+ instructions = code_c.instructions
730
+ for file in files_to_process:
731
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
732
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
733
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
734
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
735
+ if total_len >= max_code_len:
736
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
737
+ logger.debug(f"Code context max length for {func_name}"); break
738
+ code_content = "\n".join(code_str_parts)
739
+ # --- END CORRECTED LOOP ---
740
+ prompt = f"Act as reviewer for '{state['project']}' ({state['coding_language']}). Review code, instructions, user feedback, search results. Suggest improvements.\nCode:\n{code_content}\nInstr:\n{instructions}\nUser FB:\n{state.get('code_human_input', 'N/A')}\nSearch:\n{state.get('code_web_search_results', 'N/A')}\n---\nProvide feedback."
741
+ response = llm.invoke(prompt) # Use LLM from state
742
+ feedback_text = response.content.strip()
743
+ state["code_feedback"] = feedback_text
744
+ state["messages"].append(AIMessage(content=f"AI Code Feedback:\n{feedback_text}"))
745
+ logger.info("Generated AI feedback on the code.")
746
+ return state
747
+
748
+ @with_retry
749
+ def refine_code(state: MainState) -> MainState:
750
+ """Refines the code based on feedback."""
751
+ llm = state.get('llm_instance')
752
+ if not llm: raise ConnectionError("LLM instance not found in state.")
753
+ if 'messages' not in state: state['messages'] = []
754
+ func_name = "refine_code"
755
+ code_c = state.get("code_current"); instructions = ""
756
+ # --- CORRECTED LOOP ---
757
+ code_str_parts = []; total_len = 0; max_code_len = 3000
758
+ files_to_process = code_c.files if code_c and isinstance(code_c, GeneratedCode) else []
759
+ if not files_to_process: logger.warning(f"No files in code_current for {func_name}"); code_content = "No previous code."; instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
760
+ else:
761
+ instructions = code_c.instructions
762
+ for file in files_to_process:
763
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
764
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
765
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
766
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
767
+ if total_len >= max_code_len:
768
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
769
+ logger.debug(f"Code context max length for {func_name}"); break
770
+ code_content = "\n".join(code_str_parts)
771
+ # --- END CORRECTED LOOP ---
772
+ prompt = f"Act as senior {state['coding_language']} dev refining '{state['project']}'. Update code based on all feedback. Address bugs, improve style, update instructions if needed.\nCode:\n{code_content}\nInstr:\n{instructions}\nUser Exec FB:\n{state.get('code_human_input','N/A')}\nSearch:\n{state.get('code_web_search_results','N/A')}\nAI Review:\n{state.get('code_feedback','N/A')}\nHuman Comments:\n{state.get('code_human_feedback','N/A')}\n---\nOutput ONLY JSON (GeneratedCode model)."
773
+ structured_llm = llm.with_structured_output(GeneratedCode) # Use LLM from state
774
+ response = structured_llm.invoke(prompt)
775
+ if not response or not isinstance(response, GeneratedCode) or not response.files:
776
+ logger.error("Code refinement failed or invalid format."); raise ValueError("Did not produce expected file structure.")
777
+ state["code_current"] = response
778
+ summary = f"Refined code - {len(response.files)} files. Instructions:\n{response.instructions[:200]}..."
779
+ state["messages"].append(AIMessage(content=f"Refined Code:\n{summary}"))
780
+ logger.info(f"Refined code, resulting in {len(response.files)} files.")
781
+ return state
782
+
783
+ # --- Code Review & Security Cycle ---
784
+ @with_retry
785
+ def code_review(state: MainState) -> MainState:
786
+ """Performs code review on final_code_files."""
787
+ llm = state.get('llm_instance')
788
+ if not llm: raise ConnectionError("LLM instance not found in state.")
789
+ if 'messages' not in state: state['messages'] = []
790
+ func_name = "code_review"
791
+ code_files_to_review = state.get("final_code_files", [])
792
+ if not code_files_to_review: logger.warning(f"No files in final_code_files for {func_name}"); state["code_review_current_feedback"] = "No code available."; state["messages"].append(AIMessage(content="Code Review: No code.")); return state
793
+ # --- CORRECTED LOOP ---
794
+ code_str_parts = []; total_len = 0; max_code_len = 4000
795
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
796
+ files_to_process = code_files_to_review
797
+ for file in files_to_process:
798
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
799
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
800
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
801
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
802
+ if total_len >= max_code_len:
803
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
804
+ logger.debug(f"Code context max length for {func_name}"); break
805
+ code_content = "\n".join(code_str_parts)
806
+ # --- END CORRECTED LOOP ---
807
+ prompt = f"Perform detailed code review for '{state['project']}' ({state['coding_language']}). Focus on best practices, readability, logic, efficiency, robustness.\nCode:\n{code_content}\nInstr:\n{instructions}\n---\nProvide feedback."
808
+ response = llm.invoke(prompt) # Use LLM from state
809
+ feedback = response.content.strip()
810
+ state["code_review_current_feedback"] = feedback
811
+ state["messages"].append(AIMessage(content=f"Code Review:\n{feedback}"))
812
+ logger.info("Performed code review.")
813
+ return state
814
+
815
+ @with_retry
816
+ def security_check(state: MainState) -> MainState:
817
+ """Performs security check on final_code_files."""
818
+ llm = state.get('llm_instance')
819
+ if not llm: raise ConnectionError("LLM instance not found in state.")
820
+ if 'messages' not in state: state['messages'] = []
821
+ func_name = "security_check"
822
+ code_files_to_check = state.get("final_code_files", [])
823
+ if not code_files_to_check: logger.warning(f"No files in final_code_files for {func_name}"); state["security_current_feedback"] = "No code available."; state["messages"].append(AIMessage(content="Security Check: No code.")); return state
824
+ # --- CORRECTED LOOP ---
825
+ code_str_parts = []; total_len = 0; max_code_len = 4000
826
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
827
+ files_to_process = code_files_to_check
828
+ for file in files_to_process:
829
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
830
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
831
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
832
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
833
+ if total_len >= max_code_len:
834
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
835
+ logger.debug(f"Code context max length for {func_name}"); break
836
+ code_content = "\n".join(code_str_parts)
837
+ # --- END CORRECTED LOOP ---
838
+ prompt = f"Act as security expert. Analyze {state['coding_language']} code for '{state['project']}'. Check for injection, XSS, auth issues, data exposure, input validation, misconfigs, vulnerable deps.\nCode:\n{code_content}\nInstr:\n{instructions}\n---\nProvide findings, impact, remediation."
839
+ response = llm.invoke(prompt) # Use LLM from state
840
+ feedback = response.content.strip()
841
+ state["security_current_feedback"] = feedback
842
+ state["messages"].append(AIMessage(content=f"Security Check:\n{feedback}"))
843
+ logger.info("Performed security check.")
844
+ return state
845
+
846
+ @with_retry
847
+ def refine_code_with_reviews(state: MainState) -> MainState:
848
+ """Refines code based on review, security, and human feedback."""
849
+ llm = state.get('llm_instance')
850
+ if not llm: raise ConnectionError("LLM instance not found in state.")
851
+ if 'messages' not in state: state['messages'] = []
852
+ func_name = "refine_code_with_reviews"
853
+ code_files_to_refine = state.get("final_code_files", [])
854
+ if not code_files_to_refine: logger.error(f"No files in final_code_files for {func_name}"); raise ValueError("No code available.")
855
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
856
+ # --- CORRECTED LOOP ---
857
+ code_str_parts = []; total_len = 0; max_code_len = 3000
858
+ files_to_process = code_files_to_refine
859
+ if not files_to_process: logger.warning(f"No files for {func_name}"); code_content = "No previous code."
860
+ else:
861
+ for file in files_to_process:
862
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
863
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
864
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
865
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
866
+ if total_len >= max_code_len:
867
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
868
+ logger.debug(f"Code context max length for {func_name}"); break
869
+ code_content = "\n".join(code_str_parts)
870
+ # --- END CORRECTED LOOP ---
871
+ prompt = f"Refine {state['coding_language']} code for '{state['project']}'. Incorporate Code Review, Security Analysis, User Comments. Prioritize security/critical points. Update instructions if needed.\nCode:\n{code_content}\nInstr:\n{instructions}\nReview FB:\n{state.get('code_review_current_feedback', 'N/A')}\nSecurity FB:\n{state.get('security_current_feedback', 'N/A')}\nUser FB:\n{state.get('review_security_human_feedback', 'N/A')}\n---\nOutput ONLY JSON (GeneratedCode model)."
872
+ structured_llm = llm.with_structured_output(GeneratedCode) # Use LLM from state
873
+ response = structured_llm.invoke(prompt)
874
+ if not response or not isinstance(response, GeneratedCode) or not response.files:
875
+ logger.error("Code refinement post-review failed/invalid."); raise ValueError("Did not produce expected file structure.")
876
+ state["final_code_files"] = response.files; state["code_current"] = response
877
+ summary = f"Refined code ({len(response.files)} files) post-review."
878
+ state["messages"].append(AIMessage(content=f"Code Refined Post-Review:\n{summary}"))
879
+ logger.info(f"Refined code post-review, {len(response.files)} files.")
880
+ return state
881
+
882
+ # save_review_security_outputs remains unchanged
883
+ def save_review_security_outputs(state: MainState) -> MainState:
884
+ """Saves review/security feedback and the corresponding code snapshot."""
885
+ state["final_code_review"] = state.get("code_review_current_feedback", "N/A")
886
+ state["final_security_issues"] = state.get("security_current_feedback", "N/A")
887
+ rs_dir, code_snap_dir = None, None # Initialize paths
888
+ try:
889
+ abs_project_folder = os.path.abspath(state["project_folder"])
890
+ rs_dir = os.path.join(abs_project_folder, "6_review_security")
891
+ os.makedirs(rs_dir, exist_ok=True)
892
+ code_snap_dir = os.path.join(rs_dir, "code_snapshot")
893
+ os.makedirs(code_snap_dir, exist_ok=True)
894
+
895
+ # Store paths in state
896
+ state["final_review_security_folder"] = rs_dir
897
+ state["review_code_snapshot_folder"] = code_snap_dir
898
+
899
+ # Save feedback files
900
+ review_path = os.path.join(rs_dir, "final_code_review.md")
901
+ security_path = os.path.join(rs_dir, "final_security_issues.md")
902
+ with open(review_path, "w", encoding="utf-8") as f: f.write(state["final_code_review"])
903
+ with open(security_path, "w", encoding="utf-8") as f: f.write(state["final_security_issues"])
904
+ logger.debug(f"Saved review feedback files to {rs_dir}")
905
+
906
+ # Save the code snapshot (should be the version just refined)
907
+ files_to_save = state.get("final_code_files", [])
908
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
909
+
910
+ if files_to_save:
911
+ logger.info(f"Saving {len(files_to_save)} code files to snapshot folder: {code_snap_dir}")
912
+ for file in files_to_save:
913
+ filename = file.filename; content = file.content
914
+ relative_path = filename.lstrip('/\\'); filepath = os.path.normpath(os.path.join(code_snap_dir, relative_path))
915
+ if not os.path.abspath(filepath).startswith(os.path.abspath(code_snap_dir)):
916
+ logger.warning(f"Attempted path traversal! Skipping file: {filename} -> {filepath}"); continue
917
+ try:
918
+ os.makedirs(os.path.dirname(filepath), exist_ok=True)
919
+ with open(filepath, "w", encoding="utf-8") as f: f.write(content)
920
+ logger.debug(f"Saved code file: {filepath}")
921
+ except OSError as path_err: logger.error(f"Could not create directory or save file '{filepath}': {path_err}")
922
+ except Exception as write_err: logger.error(f"Error writing file '{filepath}': {write_err}")
923
+ try: # Save instructions
924
+ instr_path = os.path.join(code_snap_dir, "instructions.md")
925
+ with open(instr_path, "w", encoding="utf-8") as f: f.write(instructions)
926
+ logger.debug(f"Saved instructions file: {instr_path}")
927
+ except Exception as instr_err: logger.error(f"Error writing instructions file: {instr_err}")
928
+ logger.info(f"Finished saving review/security outputs and code snapshot to {rs_dir}")
929
+ else: logger.warning("No code files found in 'final_code_files' to save for review snapshot.")
930
+ except Exception as e:
931
+ logger.error(f"General error in save_review_security_outputs: {e}", exc_info=True)
932
+ state["final_review_security_folder"] = None; state["review_code_snapshot_folder"] = None
933
+ return state
934
+
935
+ # --- Test Case Generation Cycle ---
936
+ @with_retry
937
+ def generate_initial_test_cases(state: MainState) -> MainState:
938
+ """Generates initial test cases."""
939
+ llm = state.get('llm_instance')
940
+ if not llm: raise ConnectionError("LLM instance not found in state.")
941
+ if 'messages' not in state: state['messages'] = []
942
+ func_name = "generate_initial_test_cases"
943
+ # --- RECOMMENDED: Use corrected loop ---
944
+ code_str_parts = []; total_len = 0; max_code_len = 2000
945
+ files_to_process = state.get("final_code_files", [])
946
+ if not files_to_process: logger.warning(f"No files for {func_name}"); code_str = "No code files provided."
947
+ else:
948
+ for file in files_to_process:
949
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
950
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); break
951
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
952
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
953
+ if total_len >= max_code_len:
954
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
955
+ break
956
+ code_str = "\n".join(code_str_parts)
957
+ # --- END RECOMMENDED LOOP ---
958
+ if not state.get("final_code_files"): raise ValueError("No code found for test case generation.")
959
+ prompt = f"Generate >=3 diverse test cases (happy, edge, error) for '{state['project']}' ({state['coding_language']}). Base on stories, design, code.\nStories:\n{state.get('final_user_story', 'N/A')[:1000]}...\nDesign:\n{state.get('final_design_document', 'N/A')[:1000]}...\nCode:\n{code_str}\n---\nOutput ONLY JSON (TestCases model)."
960
+ structured_llm = llm.with_structured_output(TestCases) # Use LLM from state
961
+ response = structured_llm.invoke(prompt)
962
+ if not response or not isinstance(response, TestCases) or not response.test_cases:
963
+ logger.error("Test case gen failed/invalid."); raise ValueError("Did not produce valid test cases.")
964
+ state["test_cases_current"] = response.test_cases
965
+ summary = "\n".join([f"- {tc.description}" for tc in response.test_cases])
966
+ state["messages"].append(AIMessage(content=f"Generated Initial Test Cases:\n{summary}"))
967
+ logger.info(f"Generated {len(response.test_cases)} initial test cases.")
968
+ return state
969
+
970
+ @with_retry
971
+ def generate_test_cases_feedback(state: MainState) -> MainState:
972
+ """Generates AI feedback on test cases."""
973
+ llm = state.get('llm_instance')
974
+ if not llm: raise ConnectionError("LLM instance not found in state.")
975
+ if 'messages' not in state: state['messages'] = []
976
+ current_tests = state.get("test_cases_current", [])
977
+ if not current_tests: logger.warning("No test cases for feedback."); state["test_cases_feedback"] = "No tests found."; return state
978
+ tests_str = "\n".join([f"- {tc.description}: Input={tc.input_data}, Expected={tc.expected_output}" for tc in current_tests])
979
+ code_files = state.get("final_code_files", []); code_sample = code_files[0].content[:500] + '...' if code_files else "N/A"
980
+ prompt = f"Review test cases for '{state['project']}'. Assess coverage, clarity, effectiveness, realism. Suggest improvements.\nTests:\n{tests_str}\nStories (Context):\n{state.get('final_user_story', 'N/A')[:1000]}...\nCode (Context):\n{code_sample}\n---\nProvide feedback."
981
+ response = llm.invoke(prompt) # Use LLM from state
982
+ feedback = response.content.strip()
983
+ state["test_cases_feedback"] = feedback
984
+ state["messages"].append(AIMessage(content=f"Test Case Feedback:\n{feedback}"))
985
+ logger.info("Generated feedback on test cases.")
986
+ return state
987
+
988
+ @with_retry
989
+ def refine_test_cases_and_code(state: MainState) -> MainState:
990
+ """Refines test cases and code based on feedback."""
991
+ llm = state.get('llm_instance')
992
+ if not llm: raise ConnectionError("LLM instance not found in state.")
993
+ if 'messages' not in state: state['messages'] = []
994
+ func_name = "refine_test_cases_and_code"
995
+ current_tests = state.get("test_cases_current", []); current_code_files = state.get("final_code_files", [])
996
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
997
+ if not current_tests or not current_code_files: logger.error(f"Missing tests or code for {func_name}"); raise ValueError("Missing data.")
998
+ tests_str = "\n".join([f"- {tc.description}: Input={tc.input_data}, Expected={tc.expected_output}" for tc in current_tests])
999
+ # --- CORRECTED LOOP ---
1000
+ code_str_parts = []; total_len = 0; max_code_len = 3000
1001
+ files_to_process = current_code_files
1002
+ if not files_to_process: logger.warning(f"No files for {func_name}"); code_str = "No code."
1003
+ else:
1004
+ for file in files_to_process:
1005
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
1006
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
1007
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
1008
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
1009
+ if total_len >= max_code_len:
1010
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
1011
+ logger.debug(f"Code context max length for {func_name}"); break
1012
+ code_str = "\n".join(code_str_parts)
1013
+ # --- END CORRECTED LOOP ---
1014
+ class TestAndCode(BaseModel):
1015
+ test_cases: List[TestCase]; files: List[CodeFile]
1016
+ prompt = f"Tests failed for '{state['project']}'. Refine BOTH tests AND code based on feedback. Goal: refined code passes refined tests.\nTests:\n{tests_str}\nCode:\n{code_str}\nInstr:\n{instructions}\nAI Test FB:\n{state.get('test_cases_feedback','N/A')}\nHuman FB/Results:\n{state.get('test_cases_human_feedback','N/A')}\n---\nOutput ONLY JSON (TestAndCode model)."
1017
+ structured_llm = llm.with_structured_output(TestAndCode) # Use LLM from state
1018
+ response = structured_llm.invoke(prompt)
1019
+ if not response or not isinstance(response, TestAndCode) or not response.test_cases or not response.files:
1020
+ logger.error("Refinement of tests/code failed/invalid."); raise ValueError("Did not produce expected results.")
1021
+ state["test_cases_current"] = response.test_cases; state["final_code_files"] = response.files
1022
+ state["code_current"] = GeneratedCode(files=response.files, instructions=instructions) # Keep old instructions
1023
+ summary = f"Refined {len(response.files)} code files & {len(response.test_cases)} tests."
1024
+ state["messages"].append(AIMessage(content=f"Refined Tests and Code:\n{summary}"))
1025
+ logger.info("Refined test cases and code.")
1026
+ return state
1027
+
1028
+ # save_testing_outputs remains unchanged
1029
+ def save_testing_outputs(state: MainState) -> MainState:
1030
+ """Saves the final tests and the code version that passed them."""
1031
+ state["final_test_code_files"] = state.get("final_code_files", [])
1032
+ final_tests = state.get("test_cases_current", [])
1033
+ test_dir, code_snap_dir = None, None
1034
+ try:
1035
+ abs_project_folder = os.path.abspath(state["project_folder"])
1036
+ test_dir = os.path.join(abs_project_folder, "7_testing"); os.makedirs(test_dir, exist_ok=True)
1037
+ code_snap_dir = os.path.join(test_dir, "passed_code"); os.makedirs(code_snap_dir, exist_ok=True)
1038
+ state["final_testing_folder"] = test_dir; state["testing_passed_code_folder"] = code_snap_dir
1039
+
1040
+ # Save test cases file
1041
+ tc_path = os.path.join(test_dir, "final_test_cases.md")
1042
+ tc_str = "\n\n".join([f"**{tc.description}**\nInput:`{tc.input_data}`\nExpected:`{tc.expected_output}`" for tc in final_tests])
1043
+ with open(tc_path, "w", encoding="utf-8") as f: f.write(f"# Final Test Cases ({len(final_tests)} Passed)\n\n{tc_str}")
1044
+ logger.debug(f"Saved test cases file: {tc_path}")
1045
+
1046
+ # Save the code snapshot that passed
1047
+ passed_code_files = state.get("final_test_code_files",[]);
1048
+ instructions = state.get("code_current", GeneratedCode(files=[],instructions="")).instructions
1049
+ if passed_code_files:
1050
+ logger.info(f"Saving {len(passed_code_files)} passed code files to snapshot: {code_snap_dir}")
1051
+ for file in passed_code_files: # Save files with path safety
1052
+ fn=file.filename; content=file.content; safe_fn=os.path.basename(fn)
1053
+ if not safe_fn or ('/' in fn and '..' in fn) or ('\\' in fn and '..' in fn): logger.warning(f"Skip unsafe file: {fn}"); continue
1054
+ rel_path=fn.lstrip('/\\'); filepath=os.path.normpath(os.path.join(code_snap_dir, rel_path))
1055
+ if not os.path.abspath(filepath).startswith(os.path.abspath(code_snap_dir)): logger.warning(f"Skip traversal: {fn}"); continue
1056
+ try:
1057
+ os.makedirs(os.path.dirname(filepath), exist_ok=True);
1058
+ with open(filepath, "w", encoding="utf-8") as f: f.write(content)
1059
+ logger.debug(f"Saved code file: {filepath}")
1060
+ except OSError as path_err: logger.error(f"Path error saving '{filepath}': {path_err}")
1061
+ except Exception as write_err: logger.error(f"Error writing '{filepath}': {write_err}")
1062
+ try: # Save instructions
1063
+ instr_path = os.path.join(code_snap_dir, "instructions.md")
1064
+ with open(instr_path,"w",encoding="utf-8") as f: f.write(instructions)
1065
+ logger.debug(f"Saved instructions: {instr_path}")
1066
+ except Exception as instr_err: logger.error(f"Error writing instructions: {instr_err}")
1067
+ logger.info(f"Finished saving testing outputs and passed code to {test_dir}")
1068
+ else: logger.warning("No passed code files found in state to save.")
1069
+ except Exception as e: logger.error(f"Failed save testing outputs: {e}", exc_info=True); state["final_testing_folder"]=None; state["testing_passed_code_folder"]=None
1070
+ return state
1071
+
1072
+
1073
+ # --- Quality Analysis Cycle ---
1074
+ @with_retry
1075
+ def generate_initial_quality_analysis(state: MainState) -> MainState:
1076
+ """Generates an overall quality analysis report."""
1077
+ llm = state.get('llm_instance')
1078
+ if not llm: raise ConnectionError("LLM instance not found in state.")
1079
+ if 'messages' not in state: state['messages'] = []
1080
+ func_name = "generate_initial_quality_analysis"
1081
+ code_files_passed = state.get("final_test_code_files", [])
1082
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
1083
+ if not code_files_passed: logger.warning(f"No tested code for {func_name}."); state["quality_current_analysis"] = "No passed code available."; return state
1084
+ # --- CORRECTED LOOP ---
1085
+ code_str_parts = []; total_len = 0; max_code_len = 4000
1086
+ files_to_process = code_files_passed
1087
+ if not files_to_process: logger.error(f"Logic error: files_to_process empty in {func_name}"); code_str = "Error retrieving code."
1088
+ else:
1089
+ for file in files_to_process:
1090
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
1091
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
1092
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
1093
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
1094
+ if total_len >= max_code_len:
1095
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
1096
+ logger.debug(f"Code context max length for {func_name}"); break
1097
+ code_str = "\n".join(code_str_parts)
1098
+ # --- END CORRECTED LOOP ---
1099
+ tests_str = "\n".join([f"- {tc.description}" for tc in state.get("test_cases_current", [])])[:500] + "..."
1100
+ prompt = f"Generate QA report for '{state['project']}' ({state['coding_language']}). Code passed tests. Assess Maintainability, Perf, Scale, Security, Coverage, Docs, Confidence Score (1-10).\nCode:\n{code_str}\nTests:\n{tests_str}\nInstr:\n{instructions}\nReview Sum:\n{state.get('final_code_review','N/A')[:500]}...\nSecurity Sum:\n{state.get('final_security_issues','N/A')[:500]}...\n---"
1101
+ response = llm.invoke(prompt) # Use LLM from state
1102
+ qa_report = response.content.strip()
1103
+ state["quality_current_analysis"] = qa_report
1104
+ state["messages"].append(AIMessage(content=f"Initial Quality Analysis Report:\n{qa_report}"))
1105
+ logger.info("Generated Initial Quality Analysis Report.")
1106
+ return state
1107
+
1108
+ @with_retry
1109
+ def generate_quality_feedback(state: MainState) -> MainState:
1110
+ """Generates AI feedback on the QA report."""
1111
+ llm = state.get('llm_instance')
1112
+ if not llm: raise ConnectionError("LLM instance not found in state.")
1113
+ if 'messages' not in state: state['messages'] = []
1114
+ current_qa_report = state.get('quality_current_analysis', 'N/A')
1115
+ if current_qa_report == 'N/A': logger.warning("No QA report for feedback."); state["quality_feedback"] = "No QA report."; return state
1116
+ prompt = f"Review QA report for '{state['project']}'. Critique fairness, comprehensiveness, logic, missing aspects.\nReport:\n{current_qa_report}"
1117
+ response = llm.invoke(prompt) # Use LLM from state
1118
+ feedback = response.content.strip()
1119
+ state["quality_feedback"] = feedback
1120
+ state["messages"].append(AIMessage(content=f"Feedback on QA Report:\n{feedback}"))
1121
+ logger.info("Generated feedback on the Quality Analysis report.")
1122
+ return state
1123
+
1124
+ @with_retry
1125
+ def refine_quality_and_code(state: MainState) -> MainState:
1126
+ """Refines QA report and potentially minor code aspects."""
1127
+ llm = state.get('llm_instance')
1128
+ if not llm: raise ConnectionError("LLM instance not found in state.")
1129
+ if 'messages' not in state: state['messages'] = []
1130
+ func_name = "refine_quality_and_code"
1131
+ code_files_base = state.get("final_test_code_files", [])
1132
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
1133
+ # --- CORRECTED LOOP ---
1134
+ code_str_parts = []; total_len = 0; max_code_len = 2000
1135
+ files_to_process = code_files_base
1136
+ if not files_to_process: logger.warning(f"No tested code for {func_name}"); code_content = "N/A"
1137
+ else:
1138
+ for file in files_to_process:
1139
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
1140
+ if remaining_len <= 0: code_str_parts.append("\n*... (Code context truncated)*"); logger.debug(f"Code context truncated for {func_name}"); break
1141
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
1142
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
1143
+ if total_len >= max_code_len:
1144
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Code context truncated)*")
1145
+ logger.debug(f"Code context max length for {func_name}"); break
1146
+ code_content = "\n".join(code_str_parts)
1147
+ # --- END CORRECTED LOOP ---
1148
+ class QualityAndCode(BaseModel):
1149
+ analysis: str; files: List[CodeFile]
1150
+ prompt = f"Refine QA report for '{state['project']}' based on feedback. Also apply *minor, non-functional* code improvements (docs, names) suggested by feedback to 'Passed Code' if simple, else return original files.\nQA Report:\n{state.get('quality_current_analysis','N/A')}\nPassed Code:\n{code_content}\nInstr:\n{instructions}\nAI FB:\n{state.get('quality_feedback','N/A')}\nHuman FB:\n{state.get('quality_human_feedback','N/A')}\n---\nOutput ONLY JSON (QualityAndCode model)."
1151
+ structured_llm = llm.with_structured_output(QualityAndCode) # Use LLM from state
1152
+ response = structured_llm.invoke(prompt)
1153
+ if not response or not isinstance(response, QualityAndCode) or not response.analysis:
1154
+ logger.error("Refinement of QA report failed/invalid."); raise ValueError("Did not produce expected result.")
1155
+ state["quality_current_analysis"] = response.analysis; state["final_code_files"] = response.files
1156
+ current_instructions = state.get("code_current", GeneratedCode(files=[],instructions="")).instructions
1157
+ state["code_current"] = GeneratedCode(files=response.files, instructions=current_instructions)
1158
+ state["messages"].append(AIMessage(content=f"Refined Quality Analysis Report:\n{state['quality_current_analysis']}"))
1159
+ logger.info("Refined Quality Analysis report.")
1160
+ return state
1161
+
1162
+ # save_final_quality_analysis remains unchanged
1163
+ def save_final_quality_analysis(state: MainState) -> MainState:
1164
+ """Saves the final QA report and the associated final code snapshot."""
1165
+ state["final_quality_analysis"] = state.get("quality_current_analysis", "N/A")
1166
+ qa_dir, code_snap_dir, qa_path = None, None, None
1167
+ try:
1168
+ abs_project_folder = os.path.abspath(state["project_folder"])
1169
+ qa_dir = os.path.join(abs_project_folder, "8_quality_analysis"); os.makedirs(qa_dir, exist_ok=True)
1170
+ qa_path = os.path.join(qa_dir, "final_quality_analysis.md")
1171
+ with open(qa_path, "w", encoding="utf-8") as f: f.write(state["final_quality_analysis"])
1172
+ state["final_quality_analysis_path"] = qa_path; logger.info(f"Saved final QA report: {qa_path}")
1173
+ code_snap_dir = os.path.join(qa_dir, "final_code"); os.makedirs(code_snap_dir, exist_ok=True)
1174
+ state["final_code_folder"] = code_snap_dir
1175
+ files_to_save = state.get("final_code_files",[]); instructions = state.get("code_current", GeneratedCode(files=[],instructions="")).instructions
1176
+ if files_to_save:
1177
+ logger.info(f"Saving final code snapshot ({len(files_to_save)} files) to {code_snap_dir}")
1178
+ for file in files_to_save:
1179
+ fn=file.filename; content=file.content; safe_fn=os.path.basename(fn)
1180
+ if not safe_fn or ('/' in fn and '..' in fn) or ('\\' in fn and '..' in fn): logger.warning(f"Skip unsafe file: {fn}"); continue
1181
+ rel_path=fn.lstrip('/\\'); filepath=os.path.normpath(os.path.join(code_snap_dir, rel_path))
1182
+ if not os.path.abspath(filepath).startswith(os.path.abspath(code_snap_dir)): logger.warning(f"Skip traversal: {fn}"); continue
1183
+ try:
1184
+ os.makedirs(os.path.dirname(filepath), exist_ok=True);
1185
+ with open(filepath, "w", encoding="utf-8") as f: f.write(content)
1186
+ logger.debug(f"Saved final code file: {filepath}")
1187
+ except OSError as path_err: logger.error(f"Path error saving final code '{filepath}': {path_err}")
1188
+ except Exception as write_err: logger.error(f"Error writing final code '{filepath}': {write_err}")
1189
+ try: # Save instructions
1190
+ instr_path = os.path.join(code_snap_dir, "instructions.md")
1191
+ with open(instr_path,"w",encoding="utf-8") as f: f.write(instructions)
1192
+ logger.debug(f"Saved final instructions: {instr_path}")
1193
+ except Exception as instr_err: logger.error(f"Error writing final instructions: {instr_err}")
1194
+ else: logger.warning("No final code files found to save with QA report.")
1195
+ except Exception as e:
1196
+ logger.error(f"Failed saving QA outputs: {e}", exc_info=True);
1197
+ state["final_quality_analysis_path"]=None; state["final_code_folder"]=None
1198
+ return state
1199
+
1200
+ # --- Deployment Cycle ---
1201
+ @with_retry
1202
+ def generate_initial_deployment(state: MainState, prefs: str) -> MainState:
1203
+ """Generates initial deployment plan."""
1204
+ llm = state.get('llm_instance')
1205
+ if not llm: raise ConnectionError("LLM instance not found in state.")
1206
+ if 'messages' not in state: state['messages'] = []
1207
+ func_name = "generate_initial_deployment"
1208
+ final_code = state.get("final_code_files", [])
1209
+ if not final_code: logger.error(f"No final code for {func_name}"); raise ValueError("Final code missing.")
1210
+ instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
1211
+ # --- CORRECTED LOOP ---
1212
+ code_str_parts = []; total_len = 0; max_code_len = 2000
1213
+ files_to_process = final_code
1214
+ if not files_to_process: logger.warning(f"No files for {func_name}"); code_context = "No code files."
1215
+ else:
1216
+ for file in files_to_process:
1217
+ is_key_file = ("requirements" in file.filename.lower() or "dockerfile" in file.filename.lower() or "main." in file.filename.lower() or "app." in file.filename.lower() or ".env" in file.filename.lower() or "config" in file.filename.lower())
1218
+ if is_key_file:
1219
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
1220
+ if remaining_len <= 0: code_str_parts.append("\n*... (Key file context truncated)*"); logger.debug(f"Key file context truncated for {func_name}"); break
1221
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
1222
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
1223
+ if total_len >= max_code_len:
1224
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Key file context truncated)*")
1225
+ logger.debug(f"Key file context max length for {func_name}"); break
1226
+ code_context = "\n".join(code_str_parts) if code_str_parts else "No key deployment files found."
1227
+ # --- END CORRECTED LOOP ---
1228
+ prompt = f"Act as DevOps. Generate detailed deployment plan for '{state['project']}' ({state['coding_language']}). Base on user prefs, code structure (reqs, docker). Include commands, examples, verification steps.\nPrefs:\n{prefs}\nCode Context (Key Files):\n{code_context}\nInstr:\n{instructions}\n---"
1229
+ response = llm.invoke(prompt) # Use LLM from state
1230
+ deployment_plan = response.content.strip()
1231
+ state["deployment_current_process"] = deployment_plan
1232
+ state["messages"].append(AIMessage(content=f"Initial Deployment Plan:\n{deployment_plan}"))
1233
+ logger.info("Generated initial deployment plan.")
1234
+ return state
1235
+
1236
+ @with_retry
1237
+ def generate_deployment_feedback(state: MainState) -> MainState:
1238
+ """Generates AI feedback on deployment plan."""
1239
+ llm = state.get('llm_instance')
1240
+ if not llm: raise ConnectionError("LLM instance not found in state.")
1241
+ if 'messages' not in state: state['messages'] = []
1242
+ current_plan = state.get('deployment_current_process', 'N/A')
1243
+ if current_plan == 'N/A': logger.warning("No deploy plan to review."); state["deployment_feedback"] = "No plan."; return state
1244
+ prompt = f"Review Deployment Plan for '{state['project']}'. Assess clarity, correctness, completeness, security, alignment with practices.\nPlan:\n{current_plan}\n---\nSuggest improvements."
1245
+ response = llm.invoke(prompt) # Use LLM from state
1246
+ feedback = response.content.strip()
1247
+ state["deployment_feedback"] = feedback
1248
+ state["messages"].append(AIMessage(content=f"Deployment Plan Feedback:\n{feedback}"))
1249
+ logger.info("Generated feedback on deployment plan.")
1250
+ return state
1251
+
1252
+ @with_retry
1253
+ def refine_deployment(state: MainState) -> MainState:
1254
+ """Refines deployment plan based on feedback."""
1255
+ llm = state.get('llm_instance')
1256
+ if not llm: raise ConnectionError("LLM instance not found in state.")
1257
+ if 'messages' not in state: state['messages'] = []
1258
+ func_name = "refine_deployment"
1259
+ current_plan = state.get('deployment_current_process', 'N/A'); ai_feedback = state.get('deployment_feedback', 'N/A'); human_feedback = state.get('deployment_human_feedback', 'N/A')
1260
+ # --- ADDED LOOP ---
1261
+ code_str_parts = []; total_len = 0; max_code_len = 2000
1262
+ final_code = state.get("final_code_files", []); instructions = state.get("code_current", GeneratedCode(files=[], instructions="")).instructions
1263
+ files_to_process = final_code
1264
+ if not files_to_process: logger.warning(f"No files for {func_name}"); code_context = "No code files."
1265
+ else:
1266
+ for file in files_to_process:
1267
+ is_key_file = ("requirements" in file.filename.lower() or "dockerfile" in file.filename.lower() or "main." in file.filename.lower() or "app." in file.filename.lower() or ".env" in file.filename.lower() or "config" in file.filename.lower())
1268
+ if is_key_file:
1269
+ header = f"--- {file.filename} ---\n"; remaining_len = max_code_len - total_len - len(header)
1270
+ if remaining_len <= 0: code_str_parts.append("\n*... (Key file context truncated)*"); logger.debug(f"Key file context truncated for {func_name}"); break
1271
+ snippet = file.content[:remaining_len]; is_truncated = len(file.content) > remaining_len
1272
+ code_str_parts.append(header + snippet + ('...' if is_truncated else '')); total_len += len(header) + len(snippet)
1273
+ if total_len >= max_code_len:
1274
+ if not code_str_parts[-1].endswith("truncated)*"): code_str_parts.append("\n*... (Key file context truncated)*")
1275
+ logger.debug(f"Key file context max length for {func_name}"); break
1276
+ code_context = "\n".join(code_str_parts) if code_str_parts else "No key files."
1277
+ # --- END ADDED LOOP ---
1278
+ prompt = f"Refine deployment plan for '{state['project']}'. Update based on feedback.\nCurrent Plan:\n{current_plan}\nCode Context:\n{code_context}\nInstr:\n{instructions}\nAI FB:\n{ai_feedback}\nHuman FB:\n{human_feedback}\n---\nGenerate updated plan."
1279
+ response = llm.invoke(prompt) # Use LLM from state
1280
+ refined_plan = response.content.strip()
1281
+ state["deployment_current_process"] = refined_plan
1282
+ state["messages"].append(AIMessage(content=f"Refined Deployment Plan:\n{refined_plan}"))
1283
+ logger.info("Refined deployment plan.")
1284
+ return state
1285
+
1286
+ # save_final_deployment_plan remains unchanged
1287
+ def save_final_deployment_plan(state: MainState) -> MainState:
1288
+ """Saves the final deployment plan."""
1289
+ state["final_deployment_process"] = state.get("deployment_current_process", "No deployment plan generated.")
1290
+ filepath = None
1291
+ try:
1292
+ abs_project_folder = os.path.abspath(state["project_folder"])
1293
+ deploy_dir = os.path.join(abs_project_folder, "9_deployment"); os.makedirs(deploy_dir, exist_ok=True)
1294
+ filepath = os.path.join(deploy_dir, "final_deployment_plan.md")
1295
+ with open(filepath, "w", encoding="utf-8") as f: f.write(state["final_deployment_process"])
1296
+ logger.info(f"Saved final deployment plan: {filepath}")
1297
+ except Exception as e: logger.error(f"Failed save deployment plan: {e}", exc_info=True); filepath=None
1298
+ state["final_deployment_path"] = filepath
1299
+ return state
1300
+
1301
+ # --- END OF newSDLC.py ---
SDLC_Workflow_Graph_Diagram.png ADDED

Git LFS Details

  • SHA256: ae65b7914b1958e509d4645fe41b39bb2022ab3366f6cb9d28d051a111a3faf3
  • Pointer size: 131 Bytes
  • Size of remote file: 269 kB
app.py ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ """
3
+ Streamlit frontend application for orchestrating an AI-driven SDLC workflow.
4
+
5
+ This application manages the user interface, state transitions, and calls
6
+ backend logic functions defined in SDLC.py to generate project artifacts.
7
+ """
8
+
9
+ import streamlit as st
10
+ import os
11
+ import shutil
12
+ import logging
13
+ from datetime import datetime
14
+ import time
15
+ import zipfile # Standard library zipfile
16
+
17
+ # --- Import core logic from SDLC.py ---
18
+ try:
19
+ import SDLC
20
+ from SDLC import (
21
+ # State and Models
22
+ MainState, GeneratedCode, PlantUMLCode, TestCase, CodeFile, TestCases,
23
+ # NEW: Initialization function
24
+ initialize_llm_clients,
25
+ # Workflow Functions
26
+ generate_questions, refine_prompt,
27
+ generate_initial_user_stories, generate_user_story_feedback, refine_user_stories, save_final_user_story,
28
+ generate_initial_product_review, generate_product_review_feedback, refine_product_review, save_final_product_review,
29
+ generate_initial_design_doc, generate_design_doc_feedback, refine_design_doc, save_final_design_doc,
30
+ select_uml_diagrams, generate_initial_uml_codes, generate_uml_feedback, refine_uml_codes, save_final_uml_diagrams,
31
+ generate_initial_code, web_search_code, generate_code_feedback, refine_code,
32
+ code_review, security_check, refine_code_with_reviews, save_review_security_outputs,
33
+ generate_initial_test_cases, generate_test_cases_feedback, refine_test_cases_and_code, save_testing_outputs,
34
+ generate_initial_quality_analysis, generate_quality_feedback, refine_quality_and_code, save_final_quality_analysis,
35
+ generate_initial_deployment, generate_deployment_feedback, refine_deployment, save_final_deployment_plan,
36
+ # Message Types
37
+ HumanMessage, AIMessage
38
+ )
39
+ logging.info("Successfully imported components from SDLC.py.")
40
+ except ImportError as e:
41
+ st.error(f"Import Error: {e}. Critical file 'SDLC.py' not found or contains errors.")
42
+ logging.critical(f"Failed to import SDLC.py: {e}", exc_info=True)
43
+ st.stop()
44
+ except Exception as e:
45
+ st.error(f"An unexpected error occurred during import from SDLC: {e}")
46
+ logging.critical(f"Unexpected error during import from SDLC: {e}", exc_info=True)
47
+ st.stop()
48
+
49
+ # --- Application Setup ---
50
+ st.set_page_config(layout="wide", page_title="AI SDLC Workflow")
51
+ logger = logging.getLogger(__name__)
52
+ if not logger.handlers:
53
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
54
+ logger.info("Streamlit app logger configured.")
55
+
56
+ # --- Constants for Configuration ---
57
+ AVAILABLE_MODELS = {
58
+ "OpenAI": ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"],
59
+ "Groq": ["llama3-8b-8192", "llama3-70b-8192", "mixtral-8x7b-32768", "gemma-7b-it"],
60
+ # Add more providers and models here if supported in initialize_llm_clients
61
+ }
62
+ LLM_PROVIDERS = list(AVAILABLE_MODELS.keys())
63
+
64
+ # --- Define Cycle Order and Stage-to-Cycle Mapping ---
65
+ CYCLE_ORDER = [ "Requirements", "User Story", "Product Review", "Design", "UML", "Code Generation", "Review & Security", "Testing", "Quality Analysis", "Deployment" ]
66
+ STAGE_TO_CYCLE = { "initial_setup": "Requirements", "run_generate_questions": "Requirements", "collect_answers": "Requirements", "run_refine_prompt": "Requirements", "run_generate_initial_user_stories": "User Story", "run_generate_user_story_feedback": "User Story", "collect_user_story_human_feedback": "User Story", "run_refine_user_stories": "User Story", "collect_user_story_decision": "User Story", "run_generate_initial_product_review": "Product Review", "run_generate_product_review_feedback": "Product Review", "collect_product_review_human_feedback": "Product Review", "run_refine_product_review": "Product Review", "collect_product_review_decision": "Product Review", "run_generate_initial_design_doc": "Design", "run_generate_design_doc_feedback": "Design", "collect_design_doc_human_feedback": "Design", "run_refine_design_doc": "Design", "collect_design_doc_decision": "Design", "run_select_uml_diagrams": "UML", "run_generate_initial_uml_codes": "UML", "run_generate_uml_feedback": "UML", "collect_uml_human_feedback": "UML", "run_refine_uml_codes": "UML", "collect_uml_decision": "UML", "run_generate_initial_code": "Code Generation", "collect_code_human_input": "Code Generation", "run_web_search_code": "Code Generation", "run_generate_code_feedback": "Code Generation", "collect_code_human_feedback": "Code Generation", "run_refine_code": "Code Generation", "collect_code_decision": "Code Generation", "run_code_review": "Review & Security", "run_security_check": "Review & Security", "merge_review_security_feedback": "Review & Security", "run_refine_code_with_reviews": "Review & Security", "collect_review_security_decision": "Review & Security", "run_generate_initial_test_cases": "Testing", "run_generate_test_cases_feedback": "Testing", "collect_test_cases_human_feedback": "Testing", "run_refine_test_cases_and_code": "Testing", "run_save_testing_outputs": "Testing", "run_generate_initial_quality_analysis": "Quality Analysis", "run_generate_quality_feedback": "Quality Analysis", "collect_quality_human_feedback": "Quality Analysis", "run_refine_quality_and_code": "Quality Analysis", "collect_quality_decision": "Quality Analysis", "generate_initial_deployment": "Deployment", "run_generate_initial_deployment": "Deployment", "run_generate_deployment_feedback": "Deployment", "collect_deployment_human_feedback": "Deployment", "run_refine_deployment": "Deployment", "collect_deployment_decision": "Deployment", "END": "END" }
67
+
68
+ # --- Helper Functions ---
69
+ def initialize_state():
70
+ """Initializes or resets the Streamlit session state."""
71
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
72
+ default_project_folder = f"ai_sdlc_project_{timestamp}"
73
+ st.session_state.clear()
74
+ st.session_state.stage = "initial_setup"
75
+ st.session_state.workflow_state = {}
76
+ st.session_state.user_input = ""
77
+ st.session_state.display_content = "Welcome! Please configure API keys and project details to start."
78
+ st.session_state.project_folder_base = default_project_folder
79
+ st.session_state.current_prefs = ""
80
+ st.session_state.zip_path = None; st.session_state.review_code_zip_path = None; st.session_state.testing_code_zip_path = None; st.session_state.final_code_zip_path = None
81
+ # Configuration state
82
+ st.session_state.config_applied = False
83
+ st.session_state.selected_provider = LLM_PROVIDERS[0]
84
+ st.session_state.selected_model = AVAILABLE_MODELS[LLM_PROVIDERS[0]][0]
85
+ st.session_state.llm_api_key = ""
86
+ st.session_state.tavily_api_key = ""
87
+ st.session_state.llm_instance = None
88
+ st.session_state.tavily_instance = None
89
+ logger.info("Streamlit session state initialized.")
90
+
91
+ def update_display(new_content: str):
92
+ st.session_state.display_content = new_content; logger.debug("Main display updated.")
93
+
94
+ def create_download_button(file_path: str, label: str, mime: str, key_suffix: str, help_text: str = ""):
95
+ if not file_path or not isinstance(file_path, str): return
96
+ abs_file_path = os.path.abspath(file_path)
97
+ if os.path.exists(abs_file_path) and os.path.isfile(abs_file_path):
98
+ try:
99
+ with open(abs_file_path, "rb") as fp:
100
+ safe_label = "".join(c for c in label if c.isalnum())[:10]
101
+ button_key = f"dl_{key_suffix}_{safe_label}"
102
+ st.download_button(label=f"Download {label}", data=fp, file_name=os.path.basename(abs_file_path), mime=mime, key=button_key, help=help_text or f"Download {label}")
103
+ except FileNotFoundError: logger.warning(f"FileNotFound after check: {abs_file_path}")
104
+ except Exception as e: logger.error(f"Error prepping download btn for {abs_file_path}: {e}", exc_info=True); st.warning(f"DL Button error for {label}: {e}")
105
+
106
+ def create_zip_and_download_button(folder_path_key: str, zip_path_key: str, zip_basename: str, button_label_prefix: str, sidebar_context):
107
+ folder_path = st.session_state.workflow_state.get(folder_path_key)
108
+ abs_folder_path = os.path.abspath(folder_path) if folder_path and isinstance(folder_path, str) else None
109
+ if abs_folder_path and os.path.exists(abs_folder_path) and os.path.isdir(abs_folder_path):
110
+ zip_label = f"Generate & Download {button_label_prefix} ZIP"
111
+ existing_zip = st.session_state.get(zip_path_key)
112
+ if existing_zip and os.path.exists(existing_zip): zip_label = f"Download {button_label_prefix} ZIP"
113
+ zip_gen_key = f"zip_gen_{zip_path_key}"
114
+ if sidebar_context.button(zip_label, key=zip_gen_key):
115
+ with st.spinner(f"Creating {button_label_prefix} archive..."):
116
+ try:
117
+ out_dir = os.path.dirname(abs_folder_path); archive_base = os.path.join(out_dir, zip_basename)
118
+ root_dir = os.path.dirname(abs_folder_path); base_dir = os.path.basename(abs_folder_path)
119
+ logger.info(f"Zipping: base='{archive_base}', root='{root_dir}', dir='{base_dir}'")
120
+ zip_file = archive_base + ".zip"
121
+ if os.path.exists(zip_file):
122
+ try: os.remove(zip_file); logger.info(f"Removed old ZIP: {zip_file}")
123
+ except Exception as del_e: logger.warning(f"Could not remove old ZIP {zip_file}: {del_e}")
124
+ archive_path = shutil.make_archive(base_name=archive_base, format='zip', root_dir=root_dir, base_dir=base_dir)
125
+ if not os.path.exists(archive_path): raise OSError(f"ZIP not found after make_archive: {archive_path}")
126
+ st.session_state[zip_path_key] = archive_path; st.success(f"{button_label_prefix} ZIP created!"); st.rerun()
127
+ except Exception as e: sidebar_context.error(f"ZIP Error: {e}"); logger.error(f"ZIP failed for '{abs_folder_path}': {e}", exc_info=True)
128
+ generated_zip = st.session_state.get(zip_path_key)
129
+ if generated_zip and os.path.exists(generated_zip):
130
+ try:
131
+ with open(generated_zip, "rb") as fp:
132
+ safe_prefix = "".join(c for c in button_label_prefix if c.isalnum())[:10]
133
+ dl_key = f"dl_zip_{zip_path_key}_{safe_prefix}"
134
+ sidebar_context.download_button(label=f"Download {button_label_prefix} ZIP", data=fp, file_name=os.path.basename(generated_zip), mime="application/zip", key=dl_key)
135
+ except Exception as e: sidebar_context.warning(f"Error reading ZIP: {e}"); logger.error(f"Error reading ZIP {generated_zip}: {e}", exc_info=True)
136
+
137
+ # --- Initialization ---
138
+ if 'stage' not in st.session_state:
139
+ initialize_state()
140
+
141
+ # --- Sidebar UI ---
142
+ with st.sidebar:
143
+ st.header("AI SDLC Orchestrator")
144
+ st.divider()
145
+
146
+ # --- Configuration Section ---
147
+ with st.expander("Configuration", expanded=not st.session_state.get('config_applied', False)):
148
+ st.subheader("LLM & API Keys")
149
+ selected_provider = st.selectbox("Select LLM Provider", options=LLM_PROVIDERS, key="selected_provider", help="Choose primary LLM provider.")
150
+ available_models = AVAILABLE_MODELS.get(selected_provider, ["N/A"])
151
+ selected_model = st.selectbox(f"Select Model ({selected_provider})", options=available_models, key="selected_model", help=f"Choose model from {selected_provider}.")
152
+ llm_api_key_input = st.text_input(f"{selected_provider} API Key", type="password", key="llm_api_key_input", help=f"Enter API key for {selected_provider}.", value=st.session_state.get("llm_api_key",""))
153
+ tavily_api_key_input = st.text_input("Tavily API Key (Optional)", type="password", key="tavily_api_key_input", help="Enter Tavily key for web search.", value=st.session_state.get("tavily_api_key",""))
154
+
155
+ if st.button("Apply Configuration", key="apply_config"):
156
+ with st.spinner("Initializing..."):
157
+ st.session_state.llm_api_key = llm_api_key_input # Update actual keys used
158
+ st.session_state.tavily_api_key = tavily_api_key_input
159
+ llm_inst, tav_inst, error_msg = SDLC.initialize_llm_clients(
160
+ provider=st.session_state.selected_provider, model_name=st.session_state.selected_model,
161
+ llm_api_key=st.session_state.llm_api_key, tavily_api_key=st.session_state.tavily_api_key
162
+ )
163
+ if llm_inst:
164
+ st.session_state.llm_instance = llm_inst; st.session_state.tavily_instance = tav_inst; st.session_state.config_applied = True
165
+ st.success("Configuration Applied!"); logger.info("LLM/Tavily configured via UI.")
166
+ time.sleep(1); st.rerun() # Give time to see success, then rerun to potentially hide expander
167
+ else:
168
+ st.session_state.config_applied = False; st.session_state.llm_instance = None; st.session_state.tavily_instance = None
169
+ error_display = f"Config Failed: {error_msg or 'Unknown error.'}"; st.error(error_display); logger.error(error_display)
170
+ # --- END Configuration Section ---
171
+
172
+ st.divider()
173
+ st.header("Downloads"); st.caption("Generated artifacts and code snapshots.")
174
+ # Documents
175
+ st.markdown("---"); st.subheader("Documents")
176
+ create_download_button(st.session_state.workflow_state.get("final_user_story_path"), "User Story", "text/markdown", "us")
177
+ create_download_button(st.session_state.workflow_state.get("final_product_review_path"), "Product Review", "text/markdown", "pr")
178
+ create_download_button(st.session_state.workflow_state.get("final_design_document_path"), "Design Document", "text/markdown", "dd")
179
+ create_download_button(st.session_state.workflow_state.get("final_quality_analysis_path"), "QA Report", "text/markdown", "qa")
180
+ create_download_button(st.session_state.workflow_state.get("final_deployment_path"), "Deployment Plan", "text/markdown", "deploy")
181
+ # UML
182
+ st.markdown("---"); st.subheader("UML Diagrams")
183
+ uml_png_paths = st.session_state.workflow_state.get("final_uml_png_paths", []); uml_folder = st.session_state.workflow_state.get("final_uml_diagram_folder")
184
+ if uml_png_paths:
185
+ st.caption("Download PNG images:"); [create_download_button(p, f"UML: {'_'.join(os.path.basename(p).split('_')[2:]).replace('.png', '').replace('_', ' ').title() or f'Diagram {i+1}'}", "image/png", f"uml_{i}") for i, p in enumerate(uml_png_paths)]
186
+ elif uml_folder and os.path.exists(uml_folder): st.caption("*No PNGs generated/found.*")
187
+ else: st.caption("*UML diagrams not generated.*")
188
+ # Code Snapshots
189
+ st.markdown("---"); st.subheader("Code Snapshots (ZIP)"); st.caption("Code versions from key stages.")
190
+ create_zip_and_download_button("review_code_snapshot_folder", "review_code_zip_path", "code_snapshot_review", "Review Stage Code", st.sidebar)
191
+ create_zip_and_download_button("testing_passed_code_folder", "testing_code_zip_path", "code_snapshot_testing", "Testing Stage Code", st.sidebar)
192
+ create_zip_and_download_button("final_code_folder", "final_code_zip_path", "code_snapshot_final", "Final Code", st.sidebar)
193
+ st.divider()
194
+ # Final Project ZIP
195
+ if st.session_state.stage == "END":
196
+ st.markdown("**Full Project Archive**"); proj_folder = st.session_state.workflow_state.get("project_folder"); abs_proj = os.path.abspath(proj_folder) if proj_folder and isinstance(proj_folder, str) else None
197
+ if abs_proj and os.path.isdir(abs_proj):
198
+ zip_label = "Generate & Download Full Project ZIP";
199
+ if st.session_state.get("zip_path") and os.path.exists(st.session_state.zip_path): zip_label = "Download Full Project ZIP"
200
+ if st.sidebar.button(zip_label, key="zip_gen_final"):
201
+ with st.spinner("Creating full project archive..."):
202
+ try:
203
+ zip_base = os.path.abspath(st.session_state.project_folder_base); out_dir = os.path.dirname(zip_base); os.makedirs(out_dir, exist_ok=True)
204
+ root_dir = os.path.dirname(abs_proj); base_dir = os.path.basename(abs_proj)
205
+ logger.info(f"Zipping full project: base='{zip_base}', root='{root_dir}', dir='{base_dir}'")
206
+ zip_file = zip_base + ".zip";
207
+ if os.path.exists(zip_file):
208
+ try: os.remove(zip_file); logger.info(f"Removed old final ZIP: {zip_file}")
209
+ except Exception as del_e: logger.warning(f"Could not remove old final ZIP {zip_file}: {del_e}")
210
+ archive_path = shutil.make_archive(base_name=zip_base, format='zip', root_dir=root_dir, base_dir=base_dir)
211
+ if not os.path.exists(archive_path): raise OSError(f"Final ZIP failed: {archive_path} not found.")
212
+ st.session_state.zip_path = archive_path; st.success(f"Full project ZIP created: {os.path.basename(archive_path)}"); st.rerun()
213
+ except Exception as e: st.sidebar.error(f"Final ZIP Error: {e}"); logger.error(f"Final ZIP creation failed: {e}", exc_info=True)
214
+ if st.session_state.get("zip_path") and os.path.exists(st.session_state.zip_path):
215
+ try:
216
+ with open(st.session_state.zip_path, "rb") as fp: st.sidebar.download_button(label="Download Full Project ZIP", data=fp, file_name=os.path.basename(st.session_state.zip_path), mime="application/zip", key="dl_zip_final")
217
+ except Exception as read_e: st.sidebar.warning(f"Error reading final ZIP: {read_e}"); logger.error(f"Error reading final ZIP {st.session_state.zip_path}: {read_e}", exc_info=True)
218
+ elif proj_folder: st.sidebar.warning(f"Project folder '{proj_folder}' not found.")
219
+ else: st.sidebar.caption("*Project folder undefined.*")
220
+ st.divider()
221
+ if st.sidebar.button("Restart Workflow", key="restart_sb", help="Clear progress and start over."):
222
+ logger.info("Workflow restart requested."); initialize_state(); st.rerun()
223
+
224
+ # --- Main Layout & Controls ---
225
+ main_col, indicator_col = st.columns([4, 1])
226
+ input_needed = {"collect_answers", "collect_user_story_human_feedback", "collect_product_review_human_feedback", "collect_design_doc_human_feedback", "collect_uml_human_feedback", "collect_code_human_input", "collect_code_human_feedback", "merge_review_security_feedback", "collect_quality_human_feedback", "collect_deployment_human_feedback"}
227
+ decision_needed = {"collect_user_story_decision", "collect_product_review_decision", "collect_design_doc_decision", "collect_uml_decision", "collect_code_decision", "collect_review_security_decision", "collect_quality_decision", "collect_deployment_decision"}
228
+ current_stage = st.session_state.stage
229
+ show_input_box = current_stage in input_needed; show_decision_btns = current_stage in decision_needed; show_test_fb = current_stage == "collect_test_cases_human_feedback"; show_setup_form = current_stage == "initial_setup"; show_deploy_prefs = current_stage == "generate_initial_deployment"
230
+
231
+ with main_col:
232
+ st.header(f"Stage: {current_stage.replace('_', ' ').title()}")
233
+ st.markdown("### AI Output / Current Task:")
234
+ display_area = st.container(height=400, border=False)
235
+ with display_area: st.markdown(str(st.session_state.get("display_content", "Initializing...")), unsafe_allow_html=False)
236
+ st.divider()
237
+
238
+ # --- GATING ---
239
+ if not st.session_state.get('config_applied', False):
240
+ st.warning("👈 Please configure LLM Provider & API Keys in the sidebar first.")
241
+ else:
242
+ # --- Workflow UI ---
243
+ if show_setup_form:
244
+ with st.form("setup_form"):
245
+ st.markdown("### Project Configuration")
246
+ proj_folder = st.text_input("Project Folder Name", value=st.session_state.project_folder_base, help="Directory name. No spaces/special chars.")
247
+ proj_name = st.text_input("Project Description", value="Web Task Manager Example")
248
+ proj_cat = st.text_input("Category", value="Web Development")
249
+ proj_subcat = st.text_input("Subcategory", value="Productivity Tool")
250
+ proj_lang = st.text_input("Coding Language", value="Python")
251
+ min_iter = st.number_input("Min Q&A Rounds", 1, 5, 2)
252
+ submitted = st.form_submit_button("Start Workflow")
253
+ if submitted:
254
+ if not all([proj_folder, proj_name, proj_cat, proj_subcat, proj_lang]): st.error("Fill all fields.")
255
+ elif any(c in proj_folder for c in r'/\:*?"<>| '): st.error("Invalid chars in folder name.")
256
+ else:
257
+ try:
258
+ abs_proj = os.path.abspath(proj_folder)
259
+ if os.path.exists(abs_proj) and not os.path.isdir(abs_proj): st.error(f"File exists: '{proj_folder}'.")
260
+ else:
261
+ if os.path.exists(abs_proj): st.warning(f"Folder exists: '{abs_proj}'.")
262
+ else: os.makedirs(abs_proj, exist_ok=True); st.success(f"Folder ready: '{abs_proj}'")
263
+ # Initialize state including LLM/Tavily instances
264
+ initial_workflow_state = { "llm_instance": st.session_state.llm_instance, "tavily_instance": st.session_state.tavily_instance, "messages": [SDLC.HumanMessage(content=f"Setup:\nProject:{proj_name}\nCat:{proj_cat}\nSub:{proj_subcat}\nLang:{proj_lang}")], "project_folder": proj_folder, "project": proj_name, "category": proj_cat, "subcategory": proj_subcat, "coding_language": proj_lang, "user_input_iteration": 0, "user_input_min_iterations": min_iter, **{k: None for k in SDLC.MainState.__annotations__ if k not in ["llm_instance", "tavily_instance", "messages", "project_folder", "project", "category", "subcategory", "coding_language", "user_input_iteration", "user_input_min_iterations"]}, "user_input_questions": [], "user_input_answers": [], "user_input_done": False, "final_uml_codes": [], "final_code_files": [], "final_test_code_files": [], "test_cases_current": [], "uml_selected_diagrams": [], "uml_current_codes": [], "uml_feedback": {}, "uml_human_feedback": {}, "final_uml_png_paths": [], "code_current": SDLC.GeneratedCode(files=[], instructions=""), "user_story_done": False, "product_review_done": False, "design_doc_done": False, "uml_done": False, "code_done": False, "review_security_done": False, "test_cases_passed": False, "quality_done": False, "deployment_done": False }
265
+ st.session_state.workflow_state = initial_workflow_state; st.session_state.project_folder_base = proj_folder; st.session_state.stage = "run_generate_questions"; logger.info(f"Setup complete. Starting workflow for '{proj_name}'."); st.rerun()
266
+ except OSError as oe: st.error(f"Folder error '{proj_folder}': {oe}."); logger.error(f"OSError creating folder: {oe}", exc_info=True)
267
+ except Exception as e: st.error(f"Setup error: {e}"); logger.error(f"Setup error: {e}", exc_info=True)
268
+
269
+ elif show_deploy_prefs:
270
+ with st.form("deploy_prefs_form"):
271
+ st.markdown("### Deployment Preferences"); st.info("Specify target environment.")
272
+ deploy_target = st.selectbox("Target", ["Localhost", "Docker", "AWS EC2", "AWS Lambda", "GCP Run", "Azure App Service", "Other"], key="deploy_target")
273
+ deploy_details = st.text_area("Details:", height=100, key="deploy_details", placeholder="e.g., AWS region, Nginx, DB connection")
274
+ submitted = st.form_submit_button("Generate Plan")
275
+ if submitted: prefs = f"Target: {deploy_target}\nDetails: {deploy_details}"; st.session_state.current_prefs = prefs; st.session_state.stage = "run_generate_initial_deployment"; logger.info(f"Deploy prefs: {deploy_target}"); st.rerun()
276
+
277
+ elif show_input_box:
278
+ input_key = f"input_{current_stage}"; user_val = st.text_area("Input / Feedback:", height=150, key=input_key, value=st.session_state.get('user_input', ''), help="Provide feedback/answers. For Q&A, use #DONE when finished.")
279
+ submit_key = f"submit_{current_stage}"
280
+ if st.button("Submit", key=submit_key):
281
+ user_text = user_val.strip(); state = st.session_state.workflow_state
282
+ if not isinstance(state, dict): st.error("State invalid."); logger.critical("workflow_state invalid."); initialize_state(); st.rerun()
283
+ try:
284
+ next_stage = None; state['messages'] = state.get('messages', [])
285
+ map = { "collect_answers": ("user_input_answers", "run_generate_questions", True), "collect_user_story_human_feedback": ("user_story_human_feedback", "run_refine_user_stories", False), "collect_product_review_human_feedback": ("product_review_human_feedback", "run_refine_product_review", False), "collect_design_doc_human_feedback": ("design_doc_human_feedback", "run_refine_design_doc", False), "collect_uml_human_feedback": ("uml_human_feedback", "run_refine_uml_codes", False), "collect_code_human_input": ("code_human_input", "run_web_search_code", False), "collect_code_human_feedback": ("code_human_feedback", "run_refine_code", False), "merge_review_security_feedback": ("review_security_human_feedback", "run_refine_code_with_reviews", False), "collect_quality_human_feedback": ("quality_human_feedback", "run_refine_quality_and_code", False), "collect_deployment_human_feedback": ("deployment_human_feedback", "run_refine_deployment", False) }
286
+ if current_stage in map:
287
+ key, next_run, is_list = map[current_stage]
288
+ if is_list: state[key] = state.get(key, []) + [user_text]
289
+ elif key == "uml_human_feedback": state[key] = {"all": user_text}
290
+ else: state[key] = user_text
291
+ state["messages"].append(SDLC.HumanMessage(content=user_text)); next_stage = next_run
292
+ if current_stage == "collect_answers":
293
+ state["user_input_iteration"] = state.get("user_input_iteration", 0) + 1; min_i = state.get("user_input_min_iterations", 1)
294
+ lines = [l for l in user_text.splitlines() if l.strip()]; last = lines[-1].strip().upper() if lines else ""; done = "#DONE" in last
295
+ logger.debug(f"Q&A Iter:{state['user_input_iteration']}/{min_i}. Done:{done}")
296
+ if state["user_input_iteration"] >= min_i and done: state["user_input_done"] = True; next_stage = "run_refine_prompt"; logger.info("Q&A done.")
297
+ else: state["user_input_done"] = False; logger.info("Continuing Q&A.")
298
+ if current_stage == "collect_code_human_input" and not state.get('tavily_instance'): state["code_web_search_results"] = "Skipped (Tavily N/A)"; next_stage = "run_generate_code_feedback"; logger.info("Skipping web search.")
299
+ else: st.error(f"Input logic undefined: {current_stage}"); logger.error(f"Input logic missing: {current_stage}")
300
+ if next_stage: st.session_state.workflow_state = state; st.session_state.user_input = ""; st.session_state.stage = next_stage; logger.info(f"Input '{current_stage}'. -> '{next_stage}'."); st.rerun()
301
+ except Exception as e: st.error(f"Input error: {e}"); logger.error(f"Input error {current_stage}: {e}", exc_info=True)
302
+
303
+ elif show_test_fb:
304
+ st.markdown("### Test Execution & Feedback"); st.info("Execute tests, provide feedback & outcome.")
305
+ ai_fb = st.session_state.workflow_state.get("test_cases_feedback", "*N/A*")
306
+ with st.expander("AI Feedback on Tests"): st.markdown(ai_fb)
307
+ human_fb = st.text_area("Feedback & Results:", height=150, key="tc_fb")
308
+ pf_status = st.radio("Core Tests Passed?", ("PASS", "FAIL"), index=1, key="tc_pf", horizontal=True)
309
+ c1, c2 = st.columns(2)
310
+ with c1: # Submit Results
311
+ if st.button("Submit Results", key="submit_test"):
312
+ state = st.session_state.workflow_state; state['messages'] = state.get('messages', [])
313
+ fb = f"Res: {pf_status}\nFB:{human_fb}"; state["test_cases_human_feedback"] = fb; state["test_cases_passed"] = (pf_status == "PASS")
314
+ state["messages"].append(SDLC.HumanMessage(content=fb)); logger.info(f"Test res: {pf_status}.")
315
+ next_s = "run_save_testing_outputs" if state["test_cases_passed"] else "run_refine_test_cases_and_code"
316
+ st.session_state.stage = next_s; st.session_state.workflow_state = state; st.rerun()
317
+ with c2: # Regen Code
318
+ if st.button("Submit & Regenerate Code", key="regen_test"):
319
+ state = st.session_state.workflow_state; state['messages'] = state.get('messages', [])
320
+ fb = f"Res: {pf_status}\nFB:{human_fb}\nDecision: Regen Code."; state["test_cases_human_feedback"] = fb; state["test_cases_passed"] = False
321
+ state["messages"].append(SDLC.HumanMessage(content=fb)); logger.info(f"Test FB ({pf_status}), regen code.")
322
+ ctx = f"From Testing:\nRes:{pf_status}\nFB:{human_fb}\nAI Test FB:{ai_fb}\nRegen code.";
323
+ state["code_human_input"] = ctx; state["messages"].append(SDLC.HumanMessage(content=f"Regen Context: {ctx[:200]}..."))
324
+ st.session_state.stage = "collect_code_human_input"; st.session_state.workflow_state = state; st.rerun()
325
+
326
+ elif show_decision_btns:
327
+ st.markdown("### Decision Point"); st.info("Review output. Refine or proceed.")
328
+ refine_map = { "collect_user_story_decision": "run_generate_user_story_feedback", "collect_product_review_decision": "run_generate_product_review_feedback", "collect_design_doc_decision": "run_generate_design_doc_feedback", "collect_uml_decision": "run_generate_uml_feedback", "collect_code_decision": "collect_code_human_input", "collect_review_security_decision": "run_code_review", "collect_quality_decision": "run_generate_quality_feedback", "collect_deployment_decision": "run_generate_deployment_feedback", }
329
+ proceed_map = { "collect_user_story_decision": ("user_story_done", SDLC.save_final_user_story, "run_generate_initial_product_review"), "collect_product_review_decision": ("product_review_done", SDLC.save_final_product_review, "run_generate_initial_design_doc"), "collect_design_doc_decision": ("design_doc_done", SDLC.save_final_design_doc, "run_select_uml_diagrams"), "collect_uml_decision": ("uml_done", SDLC.save_final_uml_diagrams, "run_generate_initial_code"), "collect_code_decision": ("code_done", None, "run_code_review"), "collect_review_security_decision": ("review_security_done", SDLC.save_review_security_outputs, "run_generate_initial_test_cases"), "collect_quality_decision": ("quality_done", SDLC.save_final_quality_analysis, "generate_initial_deployment"), "collect_deployment_decision": ("deployment_done", SDLC.save_final_deployment_plan, "END"), }
330
+ cols = st.columns(3 if current_stage == "collect_quality_decision" else 2)
331
+ with cols[0]: # Refine
332
+ if st.button("Refine", key=f"refine_{current_stage}"):
333
+ if current_stage in refine_map: state = st.session_state.workflow_state; done_key = current_stage.replace("collect_", "").replace("_decision", "_done"); state[done_key]=False; next_refine = refine_map[current_stage]; st.session_state.stage = next_refine; st.session_state.workflow_state = state; logger.info(f"Decision: Refine '{current_stage}'. -> '{next_refine}'."); st.rerun()
334
+ else: st.warning("Refine undefined."); logger.warning(f"Refine undefined for {current_stage}")
335
+ with cols[1]: # Proceed
336
+ if st.button("Proceed", key=f"proceed_{current_stage}"):
337
+ if current_stage in proceed_map:
338
+ state = st.session_state.workflow_state; done_key, save_func, next_stage = proceed_map[current_stage]; err = False
339
+ try:
340
+ state[done_key] = True; logger.info(f"Decision: Proceed from '{current_stage}'. Marked '{done_key}'=True.")
341
+ if current_stage == "collect_code_decision": # Promote code
342
+ code_obj = state.get("code_current");
343
+ if code_obj and isinstance(code_obj, SDLC.GeneratedCode) and code_obj.files: state["final_code_files"] = code_obj.files; logger.info(f"Promoted {len(code_obj.files)} files.")
344
+ else: st.warning("Proceed code gen, but 'code_current' invalid."); logger.warning("Proceed code gen, invalid."); state["final_code_files"] = []
345
+ if save_func: # Save artifact
346
+ fn = getattr(save_func, '__name__', 'save_func'); logger.info(f"Saving: {fn}")
347
+ with st.spinner(f"Saving..."): state = save_func(state); st.session_state.workflow_state = state
348
+ # Post-save check (basic)
349
+ map_paths = { SDLC.save_final_user_story: "final_user_story_path", SDLC.save_final_product_review: "final_product_review_path", SDLC.save_final_design_doc: "final_design_document_path", SDLC.save_final_uml_diagrams: "final_uml_diagram_folder", SDLC.save_review_security_outputs: "final_review_security_folder", SDLC.save_testing_outputs: "final_testing_folder", SDLC.save_final_quality_analysis: "final_quality_analysis_path", SDLC.save_final_deployment_plan: "final_deployment_path", }; path_key = map_paths.get(save_func); path_val = state.get(path_key) if path_key else True; qa_ok = True if save_func != SDLC.save_final_quality_analysis else bool(state.get("final_code_folder"))
350
+ if (path_key and not path_val) or not qa_ok: st.warning(f"Saving for '{current_stage}' may have failed."); logger.warning(f"Save check failed for {fn}.")
351
+ else: logger.info(f"Save {fn} ok.")
352
+ except Exception as e: st.error(f"Finalize error '{current_stage}': {e}"); logger.error(f"Proceed error {current_stage}: {e}", exc_info=True); err = True
353
+ if not err: st.session_state.stage = next_stage; logger.info(f"-> {next_stage}"); st.rerun()
354
+ else: st.warning("Proceed undefined."); logger.warning(f"Proceed undefined for {current_stage}")
355
+ if current_stage == "collect_quality_decision": # QA Regen
356
+ with cols[2]:
357
+ if st.button("Regen Code", key="regen_qa"):
358
+ state = st.session_state.workflow_state; state['messages'] = state.get('messages', []); logger.info("Decision: Regen Code from QA.")
359
+ qa_sum = state.get('quality_current_analysis', 'N/A')[:1000]
360
+ ctx = f"From QA:\nFindings:\n{qa_sum}...\nRegen code."; state["code_human_input"] = ctx; state["messages"].append(SDLC.HumanMessage(content=f"Regen Context: {ctx[:200]}..."))
361
+ st.session_state.stage = "collect_code_human_input"; st.session_state.workflow_state = state; st.rerun()
362
+
363
+ elif current_stage == "END":
364
+ st.balloons(); final_msg = "## Workflow Completed!\n\nUse sidebar downloads or restart."; update_display(final_msg); st.markdown(final_msg); logger.info("Workflow END.")
365
+ elif not current_stage.startswith("run_"): st.error(f"Unknown UI stage: '{current_stage}'. Restart?"); logger.error(f"Unknown UI stage: {current_stage}")
366
+
367
+ # --- Cycle Indicator ---
368
+ with indicator_col:
369
+ st.subheader("Workflow Cycles")
370
+ current_major = STAGE_TO_CYCLE.get(current_stage, "Unknown"); current_idx = -1
371
+ if current_major in CYCLE_ORDER: current_idx = CYCLE_ORDER.index(current_major)
372
+ elif current_major == "END": current_idx = len(CYCLE_ORDER)
373
+ st.markdown("""<style>.cycle-item { margin-bottom: 0.75em; transition: all 0.3s ease-in-out; padding: 4px 0; } .cycle-past { opacity: 0.4; font-size: 0.9em; padding-left: 15px; border-left: 4px solid #ccc; } .cycle-current { font-weight: bold; font-size: 1.1em; color: #008080; border-left: 4px solid #008080; padding-left: 11px;} .cycle-future { opacity: 0.7; font-size: 0.95em; padding-left: 15px; border-left: 4px solid #eee; } .cycle-end { font-weight: bold; font-size: 1.0em; color: #4CAF50; border-left: 4px solid #4CAF50; padding-left: 11px; }</style>""", unsafe_allow_html=True)
374
+ win_before, win_after = 2, 4; start = max(0, current_idx - win_before); end = min(len(CYCLE_ORDER), start + win_before + win_after); start = max(0, end - (win_before + win_after))
375
+ for i, name in enumerate(CYCLE_ORDER):
376
+ if start <= i < end :
377
+ css = "cycle-item"; display = name
378
+ if i < current_idx: css += " cycle-past"
379
+ elif i == current_idx and current_major != "END": css += " cycle-current"; display = f"➡️ {name}"
380
+ else: css += " cycle-future"
381
+ st.markdown(f'<div class="{css}">{display}</div>', unsafe_allow_html=True)
382
+ if current_major == "END": st.markdown(f'<div class="cycle-item cycle-end">✅ Workflow End</div>', unsafe_allow_html=True)
383
+
384
+ # --- Invisible Stages Logic ---
385
+ # --- CORRECTED run_workflow_step function for app.py ---
386
+
387
+ def run_workflow_step(func, next_display_stage, *args):
388
+ """
389
+ Executes a backend workflow function, updates state and display content,
390
+ and transitions to the next appropriate UI stage.
391
+ """
392
+ state = st.session_state.workflow_state
393
+ # Ensure state is a dictionary before proceeding
394
+ if not isinstance(state, dict):
395
+ st.error("Critical Error: workflow_state is invalid. Restarting.")
396
+ logger.critical("run_workflow_step called with invalid state type. Forcing restart.")
397
+ initialize_state(); st.rerun(); return
398
+
399
+ func_name = getattr(func, '__name__', repr(func))
400
+ # Handle specific case for lambda function name used in deployment
401
+ if func_name == '<lambda>' and next_display_stage == "run_generate_deployment_feedback":
402
+ func_name = "generate_initial_deployment"
403
+
404
+ logger.info(f"Attempting to run workflow function: {func_name}")
405
+
406
+ try:
407
+ # Show spinner during execution
408
+ spinner_message = f"Running: {func_name.replace('_',' ').title()}..."
409
+ with st.spinner(spinner_message):
410
+ # --- Check for LLM instance before calling ---
411
+ # List of functions that DON'T require an LLM instance
412
+ non_llm_funcs = {
413
+ SDLC.save_final_user_story, SDLC.save_final_product_review,
414
+ SDLC.save_final_design_doc, SDLC.save_final_uml_diagrams,
415
+ SDLC.save_review_security_outputs, SDLC.save_testing_outputs,
416
+ SDLC.save_final_quality_analysis, SDLC.save_final_deployment_plan,
417
+ SDLC.web_search_code # web_search_code checks internally for tavily instance
418
+ }
419
+ if func not in non_llm_funcs and not state.get('llm_instance'):
420
+ raise ConnectionError("LLM is not configured or initialized in the current state.")
421
+
422
+ # --- Check for Tavily if function needs it ---
423
+ if func == SDLC.web_search_code and not state.get('tavily_instance'):
424
+ logger.warning("Web search called but Tavily instance not found in state. Skipping.")
425
+ # Update state to reflect skipped search and proceed
426
+ state["code_web_search_results"] = "Skipped (Tavily client not configured/initialized in state)"
427
+ if 'messages' not in state: state['messages'] = []
428
+ state["messages"].append(AIMessage(content="Web Search: Skipped (Tavily not available in state)"))
429
+ st.session_state.workflow_state = state
430
+ # IMPORTANT: Determine the correct next stage if web search is skipped
431
+ # In the map, run_web_search_code -> run_generate_code_feedback
432
+ # So, we directly set the next_display_stage to that
433
+ st.session_state.stage = "run_generate_code_feedback"
434
+ logger.info("Skipping web search, directly transitioning to 'run_generate_code_feedback'")
435
+ st.rerun()
436
+ return # Stop this execution
437
+
438
+ # Special handling for review -> security chain
439
+ if func == SDLC.code_review:
440
+ logger.info("Executing code review step...")
441
+ state = SDLC.code_review(state) # Call the review function
442
+ st.session_state.workflow_state = state # Update state immediately
443
+ st.session_state.stage = "run_security_check" # Set next internal stage
444
+ logger.info("Code review complete, triggering security check immediately.")
445
+ st.rerun() # Rerun to execute the security check step defined in workflow_map
446
+ return # Stop current function execution here
447
+
448
+ # Normal execution
449
+ updated_state = func(state, *args)
450
+
451
+ # Ensure the function returned a dictionary (the updated state)
452
+ if not isinstance(updated_state, dict):
453
+ logger.error(f"Function {func_name} did not return a dictionary state. Returned type: {type(updated_state)}")
454
+ st.error(f"Workflow Error: Step '{func_name}' failed internally (invalid return type).")
455
+ return # Avoid proceeding with invalid state
456
+
457
+ st.session_state.workflow_state = updated_state; logger.debug(f"State updated after {func_name}.")
458
+
459
+ # --- Determine Display Content based on the completed step ---
460
+ # Default fallback message
461
+ display_text = f"Completed: {func_name}. Preparing next step..."
462
+
463
+ # --- FULL if/elif block for customizing display_text ---
464
+ if func == SDLC.generate_questions:
465
+ questions = updated_state.get("user_input_questions", [])
466
+ num_q = len(questions); start_index = max(0, num_q - 5)
467
+ latest_questions = questions[start_index:]
468
+ if latest_questions:
469
+ min_iter = updated_state.get('user_input_min_iterations', 1)
470
+ current_iter = updated_state.get("user_input_iteration", 0) # Iteration count is updated *after* answer submission
471
+ min_iter_msg = f"(Minimum {min_iter} rounds required)" if current_iter < min_iter else ""
472
+ display_text = f"Please answer the following questions {min_iter_msg}:\n\n" + "\n".join(f"- {q}" for q in latest_questions)
473
+ if current_iter + 1 >= min_iter: # Check if *next* iteration meets minimum
474
+ display_text += "\n\n*Type '#DONE' on the last line when finished.*"
475
+ else:
476
+ # Handle case where LLM returns no questions (e.g., if requirements clear)
477
+ display_text = "AI indicates requirements may be clear. Proceeding to refine prompt..."
478
+ # Force transition if no questions were generated unexpectedly
479
+ next_display_stage = "run_refine_prompt"
480
+
481
+ elif func == SDLC.refine_prompt:
482
+ display_text = "**Refined Project Prompt:**\n\n```\n{}\n```\n\n*Proceeding to generate User Stories...*".format(updated_state.get('refined_prompt', 'N/A'))
483
+
484
+ elif func in [SDLC.generate_initial_user_stories, SDLC.refine_user_stories]:
485
+ us_current = updated_state.get('user_story_current', 'N/A')
486
+ display_text = f"**Current User Stories:**\n\n{us_current}" # Use markdown directly if it contains formatting
487
+ if func == SDLC.refine_user_stories:
488
+ display_text += "\n\n*Please review the refined stories and decide whether to refine further or proceed.*"
489
+ next_display_stage = "collect_user_story_decision" # Correct next stage after refinement
490
+ else:
491
+ display_text += "\n\n*Generating AI feedback on these stories...*"
492
+ # next_display_stage remains as passed ("run_generate_user_story_feedback")
493
+
494
+ elif func == SDLC.generate_user_story_feedback:
495
+ feedback = updated_state.get('user_story_feedback', 'N/A')
496
+ display_text = f"**AI Feedback (User Stories):**\n\n{feedback}\n\n*Please provide your feedback on the stories and the AI's assessment.*"
497
+
498
+ elif func in [SDLC.generate_initial_product_review, SDLC.refine_product_review]:
499
+ review_current = updated_state.get('product_review_current', 'N/A')
500
+ display_text = f"**Current Product Review:**\n\n{review_current}"
501
+ if func == SDLC.refine_product_review:
502
+ display_text += "\n\n*Please review the refined PO review and decide whether to refine further or proceed.*"
503
+ next_display_stage = "collect_product_review_decision"
504
+ else:
505
+ display_text += "\n\n*Generating AI feedback on this review...*"
506
+
507
+ elif func == SDLC.generate_product_review_feedback:
508
+ feedback = updated_state.get('product_review_feedback', 'N/A')
509
+ display_text = f"**AI Feedback (Product Review):**\n\n{feedback}\n\n*Please provide your feedback on the review and the AI's assessment.*"
510
+
511
+ elif func in [SDLC.generate_initial_design_doc, SDLC.refine_design_doc]:
512
+ doc_current = updated_state.get('design_doc_current', 'N/A')
513
+ display_text = f"**Current Design Document:**\n\n{doc_current}"
514
+ if func == SDLC.refine_design_doc:
515
+ display_text += "\n\n*Please review the refined design document and decide whether to refine further or proceed.*"
516
+ next_display_stage = "collect_design_doc_decision"
517
+ else:
518
+ display_text += "\n\n*Generating AI feedback on this design...*"
519
+
520
+ elif func == SDLC.generate_design_doc_feedback:
521
+ feedback = updated_state.get('design_doc_feedback', 'N/A')
522
+ display_text = f"**AI Feedback (Design Doc):**\n\n{feedback}\n\n*Please provide your feedback on the design and the AI's assessment.*"
523
+
524
+ elif func == SDLC.select_uml_diagrams:
525
+ selected = updated_state.get('uml_selected_diagrams', [])
526
+ messages = updated_state.get('messages', [])
527
+ justification_msg = messages[-1].content if messages else "Selection complete." # Try to get justification from last msg
528
+ display_text = f"**Selected UML Diagram Types:**\n\n{justification_msg}\n\n*Generating initial diagrams...*"
529
+
530
+ elif func in [SDLC.generate_initial_uml_codes, SDLC.refine_uml_codes]:
531
+ codes = updated_state.get('uml_current_codes', [])
532
+ codes_display = "\n\n".join([f"**{c.diagram_type}**:\n```plantuml\n{c.code}\n```" for c in codes])
533
+ status = "Refined" if func == SDLC.refine_uml_codes else "Generated"
534
+ display_text = f"**{status} UML Codes:**\n\n{codes_display}"
535
+ if func == SDLC.refine_uml_codes:
536
+ display_text += "\n\n*Please review the refined diagrams and decide whether to refine further or proceed.*"
537
+ next_display_stage = "collect_uml_decision"
538
+ else:
539
+ display_text += "\n\n*Generating AI feedback on these diagrams...*"
540
+
541
+ elif func == SDLC.generate_uml_feedback:
542
+ feedback_dict = updated_state.get('uml_feedback', {})
543
+ feedback_display = "\n\n".join([f"**Feedback for {dt}:**\n{fb}" for dt, fb in feedback_dict.items()])
544
+ display_text = f"**AI Feedback on UML Diagrams:**\n\n{feedback_display}\n\n*Please provide your overall feedback on the diagrams and the AI assessment.*"
545
+
546
+ elif func in [SDLC.generate_initial_code, SDLC.refine_code, SDLC.refine_code_with_reviews]:
547
+ code_data = updated_state.get("code_current")
548
+ stage_desc = "Initial" if func == SDLC.generate_initial_code else "Refined"
549
+ if code_data and isinstance(code_data, SDLC.GeneratedCode):
550
+ files_display=[]; total_len, max_len = 0, 3000
551
+ for f in code_data.files:
552
+ s=f.content[:max_len-total_len]; file_disp = f"**{f.filename}**:\n```\n{s}{'...' if len(f.content) > len(s) else ''}\n```"
553
+ files_display.append(file_disp); total_len += len(s) + len(f.filename)
554
+ if total_len >= max_len: files_display.append("\n*... (Code truncated)*"); break
555
+ num_files = len(code_data.files); instr = code_data.instructions
556
+ display_text = f"**{stage_desc} Code ({num_files} files):**\n{''.join(files_display)}\n\n**Setup/Run:**\n```\n{instr}\n```"
557
+ if func == SDLC.generate_initial_code: display_text += "\n\n*Attempt run & provide feedback.*"; next_display_stage = "collect_code_human_input"
558
+ elif func == SDLC.refine_code: display_text += "\n\n*Review refined code.*"; next_display_stage = "collect_code_decision"
559
+ elif func == SDLC.refine_code_with_reviews: display_text += "\n\n*Review code refined post-review.*"; next_display_stage = "collect_review_security_decision"
560
+ else: display_text = f"{stage_desc} code step done, but no valid code data."; logger.error(f"{func_name} invalid code data.")
561
+
562
+ elif func == SDLC.web_search_code:
563
+ results = updated_state.get('code_web_search_results', 'N/A')
564
+ display_text = f"**Web Search Results:**\n\n{results}\n\n*Generating AI feedback...*"
565
+
566
+ elif func == SDLC.generate_code_feedback:
567
+ feedback = updated_state.get('code_feedback', 'N/A')
568
+ display_text = f"**AI Code Feedback:**\n\n{feedback}\n\n*Please provide your comments.*"
569
+
570
+ elif func == SDLC.security_check: # Display after review->sec chain
571
+ review_fb = updated_state.get('code_review_current_feedback', 'N/A'); security_fb = updated_state.get('security_current_feedback', 'N/A')
572
+ display_text=f"**Code Review:**\n```\n{review_fb}\n```\n\n**Security Check:**\n```\n{security_fb}\n```\n\n*Provide overall feedback.*"
573
+
574
+ elif func == SDLC.generate_initial_test_cases:
575
+ tests = updated_state.get('test_cases_current', [])
576
+ tests_d = "\n\n".join([f"**{tc.description}**:\n - In:`{tc.input_data}`\n - Exp:`{tc.expected_output}`" for tc in tests])
577
+ display_text=f"**Generated Tests ({len(tests)}):**\n\n{tests_d}\n\n*Generating AI feedback...*"
578
+
579
+ elif func == SDLC.generate_test_cases_feedback:
580
+ feedback = updated_state.get('test_cases_feedback', 'N/A')
581
+ display_text=f"**AI Test Case Feedback:**\n\n{feedback}\n\n*Execute tests & provide results.*"
582
+
583
+ elif func == SDLC.refine_test_cases_and_code:
584
+ tests = updated_state.get('test_cases_current', []); files_count = len(updated_state.get('final_code_files', []))
585
+ tests_d = "\n\n".join([f"**{tc.description}**:\n - In:`{tc.input_data}`\n - Exp:`{tc.expected_output}`" for tc in tests])
586
+ display_text = f"**Refined Tests & Code ({files_count} files):**\n\n*Code/tests updated.*\n\n**Refined Tests ({len(tests)}):**\n{tests_d}\n\n*Execute tests again.*"
587
+ next_display_stage = "collect_test_cases_human_feedback" # Always collect feedback after refine
588
+
589
+ elif func == SDLC.save_testing_outputs:
590
+ display_text = f"Test results saved (PASS). Generating QA report..."
591
+ # next_display_stage remains as passed ("run_generate_initial_quality_analysis")
592
+
593
+ elif func in [SDLC.generate_initial_quality_analysis, SDLC.refine_quality_and_code]:
594
+ report = updated_state.get('quality_current_analysis', 'N/A')
595
+ display_text=f"**Quality Analysis Report:**\n\n{report}"
596
+ if func == SDLC.refine_quality_and_code: display_text += "\n\n*Review refined QA report.*"; next_display_stage = "collect_quality_decision"
597
+ else: display_text += "\n\n*Generating AI feedback...*"
598
+
599
+ elif func == SDLC.generate_quality_feedback:
600
+ feedback = updated_state.get('quality_feedback', 'N/A')
601
+ display_text=f"**AI Feedback on QA Report:**\n\n{feedback}\n\n*Provide your feedback.*"
602
+
603
+ elif func_name == "generate_initial_deployment": # Handle lambda
604
+ plan = updated_state.get('deployment_current_process', 'N/A')
605
+ display_text = f"**Initial Deployment Plan:**\n```\n{plan}\n```\n\n*Generating AI feedback...*"
606
+ # next_display_stage remains as passed ("run_generate_deployment_feedback")
607
+
608
+ elif func == SDLC.refine_deployment:
609
+ plan = updated_state.get('deployment_current_process', 'N/A')
610
+ display_text = f"**Refined Deployment Plan:**\n```\n{plan}\n```\n\n*Review refined plan.*"
611
+ next_display_stage = "collect_deployment_decision"
612
+
613
+ elif func == SDLC.generate_deployment_feedback:
614
+ feedback = updated_state.get('deployment_feedback', 'N/A')
615
+ display_text=f"**AI Feedback on Deployment Plan:**\n\n{feedback}\n\n*Provide your feedback.*"
616
+
617
+ # Handle Save Functions (Generic Message)
618
+ elif func in [SDLC.save_final_user_story, SDLC.save_final_product_review, SDLC.save_final_design_doc, SDLC.save_final_uml_diagrams, SDLC.save_review_security_outputs, SDLC.save_testing_outputs, SDLC.save_final_quality_analysis, SDLC.save_final_deployment_plan]:
619
+ artifact_name = func.__name__.replace('save_final_','').replace('_',' ')
620
+ # Use the next_display_stage passed into the function
621
+ next_action_stage_name = next_display_stage
622
+ next_action_desc = STAGE_TO_CYCLE.get(next_action_stage_name, next_action_stage_name).replace('_',' ').title()
623
+ if next_action_stage_name == "generate_initial_deployment": next_action_desc = "Deployment Preferences"
624
+ elif next_action_stage_name == "END": next_action_desc = "Workflow Completion"
625
+ display_text = f"Saved {artifact_name}. Starting next: {next_action_desc}..."
626
+ logger.info(f"Artifact saved: {artifact_name}. Next: {next_action_desc}")
627
+ # --- END FULL DISPLAY MAPPING ---
628
+
629
+ # --- Update display content and transition ---
630
+ update_display(display_text)
631
+ st.session_state.stage = next_display_stage
632
+ logger.info(f"Workflow function '{func_name}' completed. Transitioning UI to stage: '{next_display_stage}'")
633
+ st.rerun() # Refresh the UI
634
+
635
+ except ConnectionError as ce:
636
+ error_msg = f"Connection Error during '{func_name}': {ce}. Check API keys/network. Workflow stopped."
637
+ st.error(error_msg); logger.critical(error_msg, exc_info=True); st.stop()
638
+ except Exception as e:
639
+ error_msg = f"Error during step '{func_name}': {e}"
640
+ st.error(error_msg); logger.error(f"Error executing {func_name}: {e}", exc_info=True)
641
+ retry_key = f"retry_{func_name}_{int(time.time())}"
642
+ if st.button("Retry Last Step", key=retry_key): logger.info(f"User retry: {func_name}"); st.rerun()
643
+
644
+ # --- Workflow Map Definition (No change) ---
645
+ workflow_map = { "run_generate_questions": (SDLC.generate_questions, "collect_answers"), "run_refine_prompt": (SDLC.refine_prompt, "run_generate_initial_user_stories"), "run_generate_initial_user_stories": (SDLC.generate_initial_user_stories, "run_generate_user_story_feedback"), "run_generate_user_story_feedback": (SDLC.generate_user_story_feedback, "collect_user_story_human_feedback"), "run_refine_user_stories": (SDLC.refine_user_stories, "collect_user_story_decision"), "run_generate_initial_product_review": (SDLC.generate_initial_product_review, "run_generate_product_review_feedback"), "run_generate_product_review_feedback": (SDLC.generate_product_review_feedback, "collect_product_review_human_feedback"), "run_refine_product_review": (SDLC.refine_product_review, "collect_product_review_decision"), "run_generate_initial_design_doc": (SDLC.generate_initial_design_doc, "run_generate_design_doc_feedback"), "run_generate_design_doc_feedback": (SDLC.generate_design_doc_feedback, "collect_design_doc_human_feedback"), "run_refine_design_doc": (SDLC.refine_design_doc, "collect_design_doc_decision"), "run_select_uml_diagrams": (SDLC.select_uml_diagrams, "run_generate_initial_uml_codes"), "run_generate_initial_uml_codes": (SDLC.generate_initial_uml_codes, "run_generate_uml_feedback"), "run_generate_uml_feedback": (SDLC.generate_uml_feedback, "collect_uml_human_feedback"), "run_refine_uml_codes": (SDLC.refine_uml_codes, "collect_uml_decision"), "run_generate_initial_code": (SDLC.generate_initial_code, "collect_code_human_input"), "run_web_search_code": (SDLC.web_search_code, "run_generate_code_feedback"), "run_generate_code_feedback": (SDLC.generate_code_feedback, "collect_code_human_feedback"), "run_refine_code": (SDLC.refine_code, "collect_code_decision"), "run_code_review": (SDLC.code_review, "run_security_check"), "run_security_check": (SDLC.security_check, "merge_review_security_feedback"), "run_refine_code_with_reviews": (SDLC.refine_code_with_reviews, "collect_review_security_decision"), "run_generate_initial_test_cases": (SDLC.generate_initial_test_cases, "run_generate_test_cases_feedback"), "run_generate_test_cases_feedback": (SDLC.generate_test_cases_feedback, "collect_test_cases_human_feedback"), "run_refine_test_cases_and_code": (SDLC.refine_test_cases_and_code, "collect_test_cases_human_feedback"), "run_save_testing_outputs": (SDLC.save_testing_outputs, "run_generate_initial_quality_analysis"), "run_generate_initial_quality_analysis": (SDLC.generate_initial_quality_analysis, "run_generate_quality_feedback"), "run_generate_quality_feedback": (SDLC.generate_quality_feedback, "collect_quality_human_feedback"), "run_refine_quality_and_code": (SDLC.refine_quality_and_code, "collect_quality_decision"), "run_generate_initial_deployment": (lambda state: SDLC.generate_initial_deployment(state, st.session_state.current_prefs), "run_generate_deployment_feedback"), "run_generate_deployment_feedback": (SDLC.generate_deployment_feedback, "collect_deployment_human_feedback"), "run_refine_deployment": (SDLC.refine_deployment, "collect_deployment_decision"), }
646
+
647
+ # --- Main Execution Logic ---
648
+ if st.session_state.get('config_applied', False):
649
+ current_stage = st.session_state.stage
650
+ if current_stage.startswith("run_"):
651
+ if current_stage in workflow_map: func, next_stage = workflow_map[current_stage]; run_workflow_step(func, next_stage)
652
+ else: st.error(f"Unknown processing stage '{current_stage}'. Resetting."); logger.critical(f"Halted at unknown stage: {current_stage}."); initialize_state(); st.rerun()
653
+ # --- END OF app.py ---
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langgraph
2
+ langchain
3
+ langchain-openai
4
+ langchain-groq
5
+ langgraph-supervisor
6
+ langmem
7
+ pytest
8
+ bandit
9
+ typing_extensions
10
+ streamlit
11
+ open-deep-research
12
+ tavily-python
13
+ plantuml