hi
Browse files- analyzer.py +13 -1
- app.py +17 -1
analyzer.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import openai
|
2 |
import os
|
|
|
3 |
|
4 |
def analyze_code(code: str) -> str:
|
5 |
"""
|
@@ -8,7 +9,12 @@ def analyze_code(code: str) -> str:
|
|
8 |
"""
|
9 |
from openai import OpenAI
|
10 |
client = OpenAI()
|
11 |
-
system_prompt =
|
|
|
|
|
|
|
|
|
|
|
12 |
response = client.chat.completions.create(
|
13 |
model="gpt-4-1106-preview", # GPT-4.1 mini
|
14 |
messages=[
|
@@ -20,6 +26,12 @@ def analyze_code(code: str) -> str:
|
|
20 |
)
|
21 |
return response.choices[0].message.content
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def combine_repo_files_for_llm(repo_dir="repo_files", output_file="combined_repo.txt"):
|
24 |
"""
|
25 |
Combines all .py and .md files in the given directory (recursively) into a single text file.
|
|
|
1 |
import openai
|
2 |
import os
|
3 |
+
import json
|
4 |
|
5 |
def analyze_code(code: str) -> str:
|
6 |
"""
|
|
|
9 |
"""
|
10 |
from openai import OpenAI
|
11 |
client = OpenAI()
|
12 |
+
system_prompt = (
|
13 |
+
"You are a helpful assistant. Analyze the code given to you. "
|
14 |
+
"Return your response strictly in JSON format with the following keys: "
|
15 |
+
"'strength', 'weaknesses', 'speciality', 'relevance rating'. "
|
16 |
+
"Do not include any other text outside the JSON."
|
17 |
+
)
|
18 |
response = client.chat.completions.create(
|
19 |
model="gpt-4-1106-preview", # GPT-4.1 mini
|
20 |
messages=[
|
|
|
26 |
)
|
27 |
return response.choices[0].message.content
|
28 |
|
29 |
+
def parse_llm_json_response(response: str):
|
30 |
+
try:
|
31 |
+
return json.loads(response)
|
32 |
+
except Exception as e:
|
33 |
+
return {"error": f"Failed to parse JSON: {e}", "raw": response}
|
34 |
+
|
35 |
def combine_repo_files_for_llm(repo_dir="repo_files", output_file="combined_repo.txt"):
|
36 |
"""
|
37 |
Combines all .py and .md files in the given directory (recursively) into a single text file.
|
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import regex as re
|
3 |
import csv
|
4 |
import pandas as pd
|
5 |
-
from analyzer import combine_repo_files_for_llm, analyze_combined_file
|
6 |
from hf_utils import download_space_repo
|
7 |
|
8 |
# from hf_utils import download_space_repo
|
@@ -57,6 +57,22 @@ def show_combined_repo_and_llm():
|
|
57 |
except Exception as e:
|
58 |
return f"Error reading {txt_path}: {e}", ""
|
59 |
llm_output = analyze_combined_file(txt_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
return combined_content, llm_output
|
61 |
|
62 |
repo_id_input = gr.Textbox(label="Enter repo IDs (comma or newline separated)", lines=5, placeholder="repo1, repo2\nrepo3")
|
|
|
2 |
import regex as re
|
3 |
import csv
|
4 |
import pandas as pd
|
5 |
+
from analyzer import combine_repo_files_for_llm, analyze_combined_file, parse_llm_json_response
|
6 |
from hf_utils import download_space_repo
|
7 |
|
8 |
# from hf_utils import download_space_repo
|
|
|
57 |
except Exception as e:
|
58 |
return f"Error reading {txt_path}: {e}", ""
|
59 |
llm_output = analyze_combined_file(txt_path)
|
60 |
+
llm_json = parse_llm_json_response(llm_output)
|
61 |
+
# Update CSV for the first repo id
|
62 |
+
csv_filename = "repo_ids.csv"
|
63 |
+
try:
|
64 |
+
df = pd.read_csv(csv_filename)
|
65 |
+
for idx, row in df.iterrows():
|
66 |
+
if row["repo id"] == first_repo_id:
|
67 |
+
if isinstance(llm_json, dict):
|
68 |
+
df.at[idx, "strength"] = llm_json.get("strength", "")
|
69 |
+
df.at[idx, "weaknesses"] = llm_json.get("weaknesses", "")
|
70 |
+
df.at[idx, "speciality"] = llm_json.get("speciality", "")
|
71 |
+
df.at[idx, "relevance rating"] = llm_json.get("relevance rating", "")
|
72 |
+
break
|
73 |
+
df.to_csv(csv_filename, index=False)
|
74 |
+
except Exception as e:
|
75 |
+
pass # Optionally log error
|
76 |
return combined_content, llm_output
|
77 |
|
78 |
repo_id_input = gr.Textbox(label="Enter repo IDs (comma or newline separated)", lines=5, placeholder="repo1, repo2\nrepo3")
|