Update app.py
Browse files
app.py
CHANGED
@@ -180,15 +180,25 @@ def use_keywords_to_search_and_update_csv(keywords):
|
|
180 |
global last_repo_ids, current_repo_idx
|
181 |
if not keywords:
|
182 |
return pd.DataFrame(columns=["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
|
183 |
-
#
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
current_repo_idx = 0
|
187 |
csv_filename = "repo_ids.csv"
|
188 |
with open(csv_filename, mode="w", newline='', encoding="utf-8") as csvfile:
|
189 |
writer = csv.writer(csvfile)
|
190 |
writer.writerow(["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
|
191 |
-
for repo_id in
|
192 |
writer.writerow([repo_id, "", "", "", ""])
|
193 |
df = read_csv_as_text(csv_filename)
|
194 |
return df
|
|
|
180 |
global last_repo_ids, current_repo_idx
|
181 |
if not keywords:
|
182 |
return pd.DataFrame(columns=["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
|
183 |
+
# Split keywords and search for each
|
184 |
+
keyword_list = [k.strip() for k in keywords.split(",") if k.strip()]
|
185 |
+
repo_ids = []
|
186 |
+
for kw in keyword_list:
|
187 |
+
repo_ids.extend(search_top_spaces(kw, limit=3)) # limit=3 per keyword
|
188 |
+
# Remove duplicates while preserving order
|
189 |
+
seen = set()
|
190 |
+
unique_repo_ids = []
|
191 |
+
for rid in repo_ids:
|
192 |
+
if rid not in seen:
|
193 |
+
unique_repo_ids.append(rid)
|
194 |
+
seen.add(rid)
|
195 |
+
last_repo_ids = unique_repo_ids
|
196 |
current_repo_idx = 0
|
197 |
csv_filename = "repo_ids.csv"
|
198 |
with open(csv_filename, mode="w", newline='', encoding="utf-8") as csvfile:
|
199 |
writer = csv.writer(csvfile)
|
200 |
writer.writerow(["repo id", "strength", "weaknesses", "speciality", "relevance rating"])
|
201 |
+
for repo_id in unique_repo_ids:
|
202 |
writer.writerow([repo_id, "", "", "", ""])
|
203 |
df = read_csv_as_text(csv_filename)
|
204 |
return df
|