button
Browse files- analyzer.py +20 -0
- app.py +53 -7
- hf_utils.py +1 -1
- requirements.txt +2 -1
analyzer.py
CHANGED
@@ -17,3 +17,23 @@ def analyze_code(code: str) -> str:
|
|
17 |
temperature=0.7
|
18 |
)
|
19 |
return response.choices[0].message["content"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
temperature=0.7
|
18 |
)
|
19 |
return response.choices[0].message["content"]
|
20 |
+
|
21 |
+
def combine_repo_files_for_llm(repo_dir="repo_files", output_file="combined_repo.txt"):
|
22 |
+
"""
|
23 |
+
Combines all .py and .md files in the given directory (recursively) into a single text file.
|
24 |
+
Returns the path to the combined file.
|
25 |
+
"""
|
26 |
+
combined_content = []
|
27 |
+
for root, _, files in os.walk(repo_dir):
|
28 |
+
for file in files:
|
29 |
+
if file.endswith(".py") or file.endswith(".md"):
|
30 |
+
file_path = os.path.join(root, file)
|
31 |
+
try:
|
32 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
33 |
+
combined_content.append(f"\n# File: {file_path}\n")
|
34 |
+
combined_content.append(f.read())
|
35 |
+
except Exception as e:
|
36 |
+
combined_content.append(f"\n# Could not read {file_path}: {e}\n")
|
37 |
+
with open(output_file, "w", encoding="utf-8") as out_f:
|
38 |
+
out_f.write("\n".join(combined_content))
|
39 |
+
return output_file
|
app.py
CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
|
|
2 |
import regex as re
|
3 |
import csv
|
4 |
import pandas as pd
|
|
|
|
|
|
|
5 |
# from hf_utils import download_space_repo
|
6 |
|
7 |
def process_repo_input(text):
|
@@ -20,11 +23,54 @@ def process_repo_input(text):
|
|
20 |
df = pd.read_csv(csv_filename)
|
21 |
return df
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
demo.launch()
|
|
|
2 |
import regex as re
|
3 |
import csv
|
4 |
import pandas as pd
|
5 |
+
from analyzer import combine_repo_files_for_llm
|
6 |
+
from hf_utils import download_space_repo
|
7 |
+
|
8 |
# from hf_utils import download_space_repo
|
9 |
|
10 |
def process_repo_input(text):
|
|
|
23 |
df = pd.read_csv(csv_filename)
|
24 |
return df
|
25 |
|
26 |
+
# Store the last entered repo ids in a global variable for button access
|
27 |
+
last_repo_ids = []
|
28 |
+
|
29 |
+
def process_repo_input_and_store(text):
|
30 |
+
global last_repo_ids
|
31 |
+
if not text:
|
32 |
+
last_repo_ids = []
|
33 |
+
return pd.DataFrame(columns=["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
|
34 |
+
repo_ids = [repo.strip() for repo in re.split(r'[\n,]+', text) if repo.strip()]
|
35 |
+
last_repo_ids = repo_ids
|
36 |
+
csv_filename = "repo_ids.csv"
|
37 |
+
with open(csv_filename, mode="w", newline='', encoding="utf-8") as csvfile:
|
38 |
+
writer = csv.writer(csvfile)
|
39 |
+
writer.writerow(["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
|
40 |
+
for repo_id in repo_ids:
|
41 |
+
writer.writerow([repo_id, "", "", "", ""])
|
42 |
+
df = pd.read_csv(csv_filename)
|
43 |
+
return df
|
44 |
+
|
45 |
+
def show_combined_repo():
|
46 |
+
if not last_repo_ids:
|
47 |
+
return "No repo ID available. Please submit repo IDs first."
|
48 |
+
first_repo_id = last_repo_ids[0]
|
49 |
+
try:
|
50 |
+
download_space_repo(first_repo_id, local_dir="repo_files")
|
51 |
+
except Exception as e:
|
52 |
+
return f"Error downloading repo: {e}"
|
53 |
+
txt_path = combine_repo_files_for_llm()
|
54 |
+
try:
|
55 |
+
with open(txt_path, "r", encoding="utf-8") as f:
|
56 |
+
return f.read()
|
57 |
+
except Exception as e:
|
58 |
+
return f"Error reading {txt_path}: {e}"
|
59 |
+
|
60 |
+
repo_id_input = gr.Textbox(label="Enter repo IDs (comma or newline separated)", lines=5, placeholder="repo1, repo2\nrepo3")
|
61 |
+
df_output = gr.Dataframe(headers=["repo id", "strength", "weaknesses", "speciality", "relevance rating", "Usecase"])
|
62 |
+
|
63 |
+
with gr.Blocks() as demo:
|
64 |
+
gr.Markdown("## Repo ID Input")
|
65 |
+
repo_id_box = repo_id_input.render()
|
66 |
+
df_box = df_output.render()
|
67 |
+
submit_btn = gr.Button("Submit Repo IDs")
|
68 |
+
submit_btn.click(process_repo_input_and_store, inputs=repo_id_box, outputs=df_box)
|
69 |
+
|
70 |
+
gr.Markdown("---")
|
71 |
+
gr.Markdown("## Combine and Display Repo Files")
|
72 |
+
combine_btn = gr.Button("Download, Combine & Show .py/.md Files from First Repo")
|
73 |
+
combined_txt = gr.Textbox(label="Combined Repo Files", lines=20)
|
74 |
+
combine_btn.click(show_combined_repo, inputs=None, outputs=combined_txt)
|
75 |
+
|
76 |
demo.launch()
|
hf_utils.py
CHANGED
@@ -2,7 +2,7 @@ from huggingface_hub import snapshot_download
|
|
2 |
import os
|
3 |
import shutil
|
4 |
|
5 |
-
def download_space_repo(space_id: str, local_dir: str = "
|
6 |
"""
|
7 |
Downloads all files from a Hugging Face Space repository.
|
8 |
|
|
|
2 |
import os
|
3 |
import shutil
|
4 |
|
5 |
+
def download_space_repo(space_id: str, local_dir: str = "repo_files"):
|
6 |
"""
|
7 |
Downloads all files from a Hugging Face Space repository.
|
8 |
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
gradio
|
2 |
pandas
|
3 |
openai
|
4 |
-
regex
|
|
|
|
1 |
gradio
|
2 |
pandas
|
3 |
openai
|
4 |
+
regex
|
5 |
+
huggingface_hub
|