- analyzer.py +1 -1
- app.py +8 -1
analyzer.py
CHANGED
@@ -115,7 +115,7 @@ def aggregate_chunk_analyses(chunk_jsons: list) -> str:
|
|
115 |
client = OpenAI(api_key=os.getenv("modal_api"))
|
116 |
client.base_url = os.getenv("base_url")
|
117 |
aggregation_prompt = (
|
118 |
-
"You are a highly precise and strict JSON generator. You are given a list of JSON analyses of code chunks. "
|
119 |
"Aggregate these into a SINGLE overall JSON summary with the same keys: 'strength', 'weaknesses', 'speciality', 'relevance rating'. "
|
120 |
"Summarize and combine the information from all chunks. Do NOT include any explanation, markdown, or text outside the JSON. "
|
121 |
"If a key is missing in all chunks, use an empty string. "
|
|
|
115 |
client = OpenAI(api_key=os.getenv("modal_api"))
|
116 |
client.base_url = os.getenv("base_url")
|
117 |
aggregation_prompt = (
|
118 |
+
"You are a highly precise and strict, code analyzer and JSON generator. You are given a list of JSON analyses of code chunks. "
|
119 |
"Aggregate these into a SINGLE overall JSON summary with the same keys: 'strength', 'weaknesses', 'speciality', 'relevance rating'. "
|
120 |
"Summarize and combine the information from all chunks. Do NOT include any explanation, markdown, or text outside the JSON. "
|
121 |
"If a key is missing in all chunks, use an empty string. "
|
app.py
CHANGED
@@ -105,7 +105,14 @@ def show_combined_repo_and_llm():
|
|
105 |
except Exception as e:
|
106 |
return f"Error reading {txt_path}: {e}", "", read_csv_as_text("repo_ids.csv")
|
107 |
llm_output = analyze_combined_file(txt_path)
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
# Update CSV for the current repo id
|
110 |
csv_filename = "repo_ids.csv"
|
111 |
extraction_status = ""
|
|
|
105 |
except Exception as e:
|
106 |
return f"Error reading {txt_path}: {e}", "", read_csv_as_text("repo_ids.csv")
|
107 |
llm_output = analyze_combined_file(txt_path)
|
108 |
+
# Extract only the last JSON object (final summary) for CSV writing
|
109 |
+
last_start = llm_output.rfind('{')
|
110 |
+
last_end = llm_output.rfind('}')
|
111 |
+
if last_start != -1 and last_end != -1 and last_end > last_start:
|
112 |
+
final_json_str = llm_output[last_start:last_end+1]
|
113 |
+
else:
|
114 |
+
final_json_str = llm_output
|
115 |
+
llm_json = parse_llm_json_response(final_json_str)
|
116 |
# Update CSV for the current repo id
|
117 |
csv_filename = "repo_ids.csv"
|
118 |
extraction_status = ""
|