Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -17,11 +17,10 @@ import base64
|
|
17 |
def debug_log(message: str):
|
18 |
print(f"[DEBUG] {message}")
|
19 |
|
20 |
-
#
|
21 |
def scrape_naver_blog(url: str) -> str:
|
22 |
debug_log("scrape_naver_blog ν¨μ μμ")
|
23 |
debug_log(f"μμ²λ°μ URL: {url}")
|
24 |
-
|
25 |
headers = {
|
26 |
"User-Agent": (
|
27 |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
@@ -29,20 +28,14 @@ def scrape_naver_blog(url: str) -> str:
|
|
29 |
"Chrome/96.0.4664.110 Safari/537.36"
|
30 |
)
|
31 |
}
|
32 |
-
|
33 |
try:
|
34 |
-
# 1) λ€μ΄λ² λΈλ‘κ·Έ 'λ©μΈ' νμ΄μ§ μμ²
|
35 |
response = requests.get(url, headers=headers)
|
36 |
debug_log("HTTP GET μμ²(λ©μΈ νμ΄μ§) μλ£")
|
37 |
if response.status_code != 200:
|
38 |
debug_log(f"μμ² μ€ν¨, μνμ½λ: {response.status_code}")
|
39 |
return f"μ€λ₯κ° λ°μνμ΅λλ€. μνμ½λ: {response.status_code}"
|
40 |
-
|
41 |
-
# 2) λ©μΈ νμ΄μ§ νμ±
|
42 |
soup = BeautifulSoup(response.text, "html.parser")
|
43 |
debug_log("HTML νμ±(λ©μΈ νμ΄μ§) μλ£")
|
44 |
-
|
45 |
-
# 3) iframe νκ·Έ μ°ΎκΈ°
|
46 |
iframe = soup.select_one("iframe#mainFrame")
|
47 |
if not iframe:
|
48 |
debug_log("iframe#mainFrame νκ·Έλ₯Ό μ°Ύμ μ μμ΅λλ€.")
|
@@ -51,12 +44,8 @@ def scrape_naver_blog(url: str) -> str:
|
|
51 |
if not iframe_src:
|
52 |
debug_log("iframe srcκ° μ‘΄μ¬νμ§ μμ΅λλ€.")
|
53 |
return "λ³Έλ¬Έ iframeμ srcλ₯Ό μ°Ύμ μ μμ΅λλ€."
|
54 |
-
|
55 |
-
# 4) iframe src 보μ (μ λκ²½λ‘ μ²λ¦¬)
|
56 |
parsed_iframe_url = urllib.parse.urljoin(url, iframe_src)
|
57 |
debug_log(f"iframe νμ΄μ§ μμ² URL: {parsed_iframe_url}")
|
58 |
-
|
59 |
-
# 5) iframe νμ΄μ§ μμ² λ° νμ±
|
60 |
iframe_response = requests.get(parsed_iframe_url, headers=headers)
|
61 |
debug_log("HTTP GET μμ²(iframe νμ΄μ§) μλ£")
|
62 |
if iframe_response.status_code != 200:
|
@@ -64,8 +53,6 @@ def scrape_naver_blog(url: str) -> str:
|
|
64 |
return f"iframeμμ μ€λ₯κ° λ°μνμ΅λλ€. μνμ½λ: {iframe_response.status_code}"
|
65 |
iframe_soup = BeautifulSoup(iframe_response.text, "html.parser")
|
66 |
debug_log("HTML νμ±(iframe νμ΄μ§) μλ£")
|
67 |
-
|
68 |
-
# 6) μ λͺ©κ³Ό λ³Έλ¬Έ μΆμΆ
|
69 |
title_div = iframe_soup.select_one('.se-module.se-module-text.se-title-text')
|
70 |
title = title_div.get_text(strip=True) if title_div else "μ λͺ©μ μ°Ύμ μ μμ΅λλ€."
|
71 |
debug_log(f"μΆμΆλ μ λͺ©: {title}")
|
@@ -75,58 +62,42 @@ def scrape_naver_blog(url: str) -> str:
|
|
75 |
else:
|
76 |
content = "λ³Έλ¬Έμ μ°Ύμ μ μμ΅λλ€."
|
77 |
debug_log("λ³Έλ¬Έ μΆμΆ μλ£")
|
78 |
-
|
79 |
result = f"[μ λͺ©]\n{title}\n\n[λ³Έλ¬Έ]\n{content}"
|
80 |
-
debug_log("μ λͺ©κ³Ό
|
81 |
return result
|
82 |
-
|
83 |
except Exception as e:
|
84 |
debug_log(f"μλ¬ λ°μ: {str(e)}")
|
85 |
return f"μ€ν¬λν μ€ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
|
86 |
|
87 |
-
#
|
88 |
def analyze_text(text: str):
|
89 |
logging.basicConfig(level=logging.DEBUG)
|
90 |
logger = logging.getLogger(__name__)
|
91 |
logger.debug("μλ³Έ ν
μ€νΈ: %s", text)
|
92 |
-
|
93 |
-
# 1. νκ΅μ΄λ§ λ¨κΈ°κΈ° (곡백, μμ΄, κΈ°νΈ λ± μ κ±°)
|
94 |
filtered_text = re.sub(r'[^κ°-ν£]', '', text)
|
95 |
-
logger.debug("νν°λ§λ
|
96 |
-
|
97 |
if not filtered_text:
|
98 |
logger.debug("μ ν¨ν νκ΅μ΄ ν
μ€νΈκ° μμ.")
|
99 |
return pd.DataFrame(columns=["λ¨μ΄", "λΉλμ"]), ""
|
100 |
-
|
101 |
-
# 2. Mecabμ μ΄μ©ν ννμ λΆμ (λͺ
μ¬μ 볡ν©λͺ
μ¬λ§ μΆμΆ)
|
102 |
mecab_instance = mecab.MeCab()
|
103 |
tokens = mecab_instance.pos(filtered_text)
|
104 |
logger.debug("ννμ λΆμ κ²°κ³Ό: %s", tokens)
|
105 |
-
|
106 |
freq = {}
|
107 |
for word, pos in tokens:
|
108 |
-
if word and word.strip():
|
109 |
-
|
110 |
-
|
111 |
-
logger.debug("λ¨μ΄: %s, νμ¬: %s, νμ¬ λΉλ: %d", word, pos, freq[word])
|
112 |
-
|
113 |
-
# 3. λΉλμλ₯Ό λ΄λ¦Όμ°¨μ μ λ ¬
|
114 |
sorted_freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)
|
115 |
-
logger.debug("
|
116 |
-
|
117 |
-
# 4. κ²°κ³Ό DataFrame μμ±
|
118 |
df = pd.DataFrame(sorted_freq, columns=["λ¨μ΄", "λΉλμ"])
|
119 |
-
logger.debug("
|
120 |
-
|
121 |
-
# 5. Excel νμΌ μμ± (μμ νμΌ)
|
122 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx")
|
123 |
df.to_excel(temp_file.name, index=False, engine='openpyxl')
|
124 |
temp_file.close()
|
125 |
logger.debug("Excel νμΌ μμ±λ¨: %s", temp_file.name)
|
126 |
-
|
127 |
return df, temp_file.name
|
128 |
|
129 |
-
#
|
130 |
def generate_signature(timestamp, method, uri, secret_key):
|
131 |
message = f"{timestamp}.{method}.{uri}"
|
132 |
digest = hmac.new(secret_key.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).digest()
|
@@ -148,7 +119,6 @@ def fetch_related_keywords(keyword):
|
|
148 |
API_KEY = os.environ["NAVER_API_KEY"]
|
149 |
SECRET_KEY = os.environ["NAVER_SECRET_KEY"]
|
150 |
CUSTOMER_ID = os.environ["NAVER_CUSTOMER_ID"]
|
151 |
-
|
152 |
BASE_URL = "https://api.naver.com"
|
153 |
uri = "/keywordstool"
|
154 |
method = "GET"
|
@@ -164,13 +134,11 @@ def fetch_related_keywords(keyword):
|
|
164 |
df = pd.DataFrame(data["keywordList"])
|
165 |
if len(df) > 100:
|
166 |
df = df.head(100)
|
167 |
-
|
168 |
def parse_count(x):
|
169 |
try:
|
170 |
return int(str(x).replace(",", ""))
|
171 |
except:
|
172 |
return 0
|
173 |
-
|
174 |
df["PCμκ²μλ"] = df["monthlyPcQcCnt"].apply(parse_count)
|
175 |
df["λͺ¨λ°μΌμκ²μλ"] = df["monthlyMobileQcCnt"].apply(parse_count)
|
176 |
df["ν νμκ²μλ"] = df["PCμκ²μλ"] + df["λͺ¨λ°μΌμκ²μλ"]
|
@@ -209,7 +177,6 @@ def process_keyword(keywords: str, include_related: bool):
|
|
209 |
debug_log(f"process_keyword νΈμΆ, ν€μλλ€: {keywords}, μ°κ΄κ²μμ΄ ν¬ν¨: {include_related}")
|
210 |
input_keywords = [k.strip() for k in keywords.splitlines() if k.strip()]
|
211 |
result_dfs = []
|
212 |
-
|
213 |
for idx, kw in enumerate(input_keywords):
|
214 |
df_kw = fetch_related_keywords(kw)
|
215 |
if df_kw.empty:
|
@@ -223,81 +190,86 @@ def process_keyword(keywords: str, include_related: bool):
|
|
223 |
df_related = df_kw[df_kw["μ 보ν€μλ"] != kw]
|
224 |
if not df_related.empty:
|
225 |
result_dfs.append(df_related)
|
226 |
-
|
227 |
if result_dfs:
|
228 |
result_df = pd.concat(result_dfs, ignore_index=True)
|
229 |
result_df.drop_duplicates(subset=["μ 보ν€μλ"], inplace=True)
|
230 |
else:
|
231 |
result_df = pd.DataFrame(columns=["μ 보ν€μλ", "PCμκ²μλ", "λͺ¨λ°μΌμκ²μλ", "ν νμκ²μλ"])
|
232 |
-
|
233 |
result_df["λΈλ‘κ·Έλ¬Έμμ"] = result_df["μ 보ν€μλ"].apply(fetch_blog_count)
|
234 |
result_df.sort_values(by="ν νμκ²μλ", ascending=False, inplace=True)
|
235 |
debug_log("process_keyword μλ£")
|
236 |
return result_df, create_excel_file(result_df)
|
237 |
|
238 |
-
#
|
239 |
def morphological_analysis_and_enrich(text: str, remove_freq1: bool):
|
240 |
debug_log("morphological_analysis_and_enrich ν¨μ μμ")
|
241 |
df_freq, _ = analyze_text(text)
|
242 |
if df_freq.empty:
|
243 |
debug_log("ννμ λΆμ κ²°κ³Όκ° λΉ λ°μ΄ν°νλ μμ
λλ€.")
|
244 |
return df_freq, ""
|
245 |
-
|
246 |
if remove_freq1:
|
247 |
before_shape = df_freq.shape
|
248 |
df_freq = df_freq[df_freq["λΉλμ"] != 1]
|
249 |
debug_log(f"λΉλμ 1 μ κ±° μ μ©λ¨. {before_shape} -> {df_freq.shape}")
|
250 |
-
|
251 |
-
# ννμ λΆμ κ²°κ³Όμμ ν€μλ μΆμΆ (κ° λ¨μ΄λ₯Ό μν°λ‘ ꡬλΆ)
|
252 |
keywords = "\n".join(df_freq["λ¨μ΄"].tolist())
|
253 |
debug_log(f"λΆμλ ν€μλ: {keywords}")
|
254 |
-
|
255 |
-
# [μ°Έμ‘°μ½λ-2]λ₯Ό νμ©νμ¬ κ° ν€μλμ κ²μλ λ° λΈλ‘κ·Έλ¬Έμμ μ‘°ν (μ°κ΄κ²μμ΄ λ―Έν¬ν¨)
|
256 |
df_keyword_info, _ = process_keyword(keywords, include_related=False)
|
257 |
debug_log("κ²μλ λ° λΈλ‘κ·Έλ¬Έμμ μ‘°ν μλ£")
|
258 |
-
|
259 |
-
# ννμ λΆμ κ²°κ³Όμ κ²μλ μ 보λ₯Ό λ³ν© (ν€μλ κΈ°μ€)
|
260 |
merged_df = pd.merge(df_freq, df_keyword_info, left_on="λ¨μ΄", right_on="μ 보ν€μλ", how="left")
|
261 |
merged_df.drop(columns=["μ 보ν€μλ"], inplace=True)
|
262 |
-
|
263 |
-
# λ³ν© κ²°κ³Ό Excel νμΌ μμ±
|
264 |
merged_excel_path = create_excel_file(merged_df)
|
265 |
debug_log("morphological_analysis_and_enrich ν¨μ μλ£")
|
266 |
return merged_df, merged_excel_path
|
267 |
|
268 |
-
#
|
269 |
-
def
|
270 |
-
debug_log("
|
271 |
-
#
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
|
293 |
-
# μ€ν¬λν
|
294 |
def fetch_blog_content(url: str):
|
295 |
debug_log("fetch_blog_content ν¨μ μμ")
|
296 |
content = scrape_naver_blog(url)
|
297 |
debug_log("fetch_blog_content ν¨μ μλ£")
|
298 |
return content
|
299 |
|
300 |
-
# Gradio μΈν°νμ΄μ€ ꡬμ±
|
301 |
with gr.Blocks(title="λ€μ΄λ² λΈλ‘κ·Έ ννμ λΆμ μ€νμ΄μ€", css=".gradio-container { max-width: 960px; margin: auto; }") as demo:
|
302 |
gr.Markdown("# λ€μ΄λ² λΈλ‘κ·Έ ννμ λΆμ μ€νμ΄μ€")
|
303 |
with gr.Row():
|
@@ -307,26 +279,18 @@ with gr.Blocks(title="λ€μ΄λ² λΈλ‘κ·Έ ννμ λΆμ μ€νμ΄μ€", css=".
|
|
307 |
blog_content_box = gr.Textbox(label="λΈλ‘κ·Έ λ΄μ© (μμ κ°λ₯)", lines=10, placeholder="μ€ν¬λνλ λΈλ‘κ·Έ λ΄μ©μ΄ μ¬κΈ°μ νμλ©λλ€.")
|
308 |
with gr.Row():
|
309 |
remove_freq_checkbox = gr.Checkbox(label="λΉλμ1 μ κ±°", value=False)
|
310 |
-
|
311 |
-
keyword_input_box = gr.Textbox(label="μ§μ ν€μλ μ
λ ₯ (μν° λλ ','λ‘ κ΅¬λΆ)", lines=2, placeholder="μ: ν€μλ1, ν€μλ2\nν€μλ3")
|
312 |
with gr.Row():
|
313 |
analyze_button = gr.Button("λΆμ μ€ν")
|
314 |
-
|
315 |
-
gr.Markdown("### ννμ λΆμ κ²°κ³Ό")
|
316 |
with gr.Row():
|
317 |
-
|
318 |
-
morph_excel_file = gr.File(label="ννμ λΆμ Excel λ€μ΄λ‘λ")
|
319 |
-
|
320 |
-
gr.Markdown("### μ§μ ν€μλ λΆμ κ²°κ³Ό")
|
321 |
with gr.Row():
|
322 |
-
|
323 |
-
direct_excel_file = gr.File(label="μ§μ ν€μλ λΆμ Excel λ€μ΄λ‘λ")
|
324 |
|
325 |
-
#
|
326 |
scrape_button.click(fn=fetch_blog_content, inputs=blog_url_input, outputs=blog_content_box)
|
327 |
-
|
328 |
-
|
329 |
-
outputs=[morph_result_df, morph_excel_file, direct_result_df, direct_excel_file])
|
330 |
|
331 |
if __name__ == "__main__":
|
332 |
debug_log("Gradio μ± μ€ν μμ")
|
|
|
17 |
def debug_log(message: str):
|
18 |
print(f"[DEBUG] {message}")
|
19 |
|
20 |
+
# --- λ€μ΄λ² λΈλ‘κ·Έ μ€ν¬λν ---
|
21 |
def scrape_naver_blog(url: str) -> str:
|
22 |
debug_log("scrape_naver_blog ν¨μ μμ")
|
23 |
debug_log(f"μμ²λ°μ URL: {url}")
|
|
|
24 |
headers = {
|
25 |
"User-Agent": (
|
26 |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
|
28 |
"Chrome/96.0.4664.110 Safari/537.36"
|
29 |
)
|
30 |
}
|
|
|
31 |
try:
|
|
|
32 |
response = requests.get(url, headers=headers)
|
33 |
debug_log("HTTP GET μμ²(λ©μΈ νμ΄μ§) μλ£")
|
34 |
if response.status_code != 200:
|
35 |
debug_log(f"μμ² μ€ν¨, μνμ½λ: {response.status_code}")
|
36 |
return f"μ€λ₯κ° λ°μνμ΅λλ€. μνμ½λ: {response.status_code}"
|
|
|
|
|
37 |
soup = BeautifulSoup(response.text, "html.parser")
|
38 |
debug_log("HTML νμ±(λ©μΈ νμ΄μ§) μλ£")
|
|
|
|
|
39 |
iframe = soup.select_one("iframe#mainFrame")
|
40 |
if not iframe:
|
41 |
debug_log("iframe#mainFrame νκ·Έλ₯Ό μ°Ύμ μ μμ΅λλ€.")
|
|
|
44 |
if not iframe_src:
|
45 |
debug_log("iframe srcκ° μ‘΄μ¬νμ§ μμ΅λλ€.")
|
46 |
return "λ³Έλ¬Έ iframeμ srcλ₯Ό μ°Ύμ μ μμ΅λλ€."
|
|
|
|
|
47 |
parsed_iframe_url = urllib.parse.urljoin(url, iframe_src)
|
48 |
debug_log(f"iframe νμ΄μ§ μμ² URL: {parsed_iframe_url}")
|
|
|
|
|
49 |
iframe_response = requests.get(parsed_iframe_url, headers=headers)
|
50 |
debug_log("HTTP GET μμ²(iframe νμ΄μ§) μλ£")
|
51 |
if iframe_response.status_code != 200:
|
|
|
53 |
return f"iframeμμ μ€λ₯κ° λ°μνμ΅λλ€. μνμ½λ: {iframe_response.status_code}"
|
54 |
iframe_soup = BeautifulSoup(iframe_response.text, "html.parser")
|
55 |
debug_log("HTML νμ±(iframe νμ΄μ§) μλ£")
|
|
|
|
|
56 |
title_div = iframe_soup.select_one('.se-module.se-module-text.se-title-text')
|
57 |
title = title_div.get_text(strip=True) if title_div else "μ λͺ©μ μ°Ύμ μ μμ΅λλ€."
|
58 |
debug_log(f"μΆμΆλ μ λͺ©: {title}")
|
|
|
62 |
else:
|
63 |
content = "λ³Έλ¬Έμ μ°Ύμ μ μμ΅λλ€."
|
64 |
debug_log("λ³Έλ¬Έ μΆμΆ μλ£")
|
|
|
65 |
result = f"[μ λͺ©]\n{title}\n\n[λ³Έλ¬Έ]\n{content}"
|
66 |
+
debug_log("μ λͺ©κ³Ό λ³Έλ¬Έ ν©μΉ¨ μλ£")
|
67 |
return result
|
|
|
68 |
except Exception as e:
|
69 |
debug_log(f"μλ¬ λ°μ: {str(e)}")
|
70 |
return f"μ€ν¬λν μ€ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
|
71 |
|
72 |
+
# --- ννμ λΆμ (μ°Έμ‘°μ½λ-1) ---
|
73 |
def analyze_text(text: str):
|
74 |
logging.basicConfig(level=logging.DEBUG)
|
75 |
logger = logging.getLogger(__name__)
|
76 |
logger.debug("μλ³Έ ν
μ€νΈ: %s", text)
|
|
|
|
|
77 |
filtered_text = re.sub(r'[^κ°-ν£]', '', text)
|
78 |
+
logger.debug("νν°λ§λ ν
μ€νΈ: %s", filtered_text)
|
|
|
79 |
if not filtered_text:
|
80 |
logger.debug("μ ν¨ν νκ΅μ΄ ν
μ€νΈκ° μμ.")
|
81 |
return pd.DataFrame(columns=["λ¨μ΄", "λΉλμ"]), ""
|
|
|
|
|
82 |
mecab_instance = mecab.MeCab()
|
83 |
tokens = mecab_instance.pos(filtered_text)
|
84 |
logger.debug("ννμ λΆμ κ²°κ³Ό: %s", tokens)
|
|
|
85 |
freq = {}
|
86 |
for word, pos in tokens:
|
87 |
+
if word and word.strip() and pos.startswith("NN"):
|
88 |
+
freq[word] = freq.get(word, 0) + 1
|
89 |
+
logger.debug("λ¨μ΄: %s, νμ¬: %s, λΉλ: %d", word, pos, freq[word])
|
|
|
|
|
|
|
90 |
sorted_freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)
|
91 |
+
logger.debug("μ λ ¬λ λ¨μ΄ λΉλ: %s", sorted_freq)
|
|
|
|
|
92 |
df = pd.DataFrame(sorted_freq, columns=["λ¨μ΄", "λΉλμ"])
|
93 |
+
logger.debug("ννμ λΆμ DataFrame μμ±λ¨, shape: %s", df.shape)
|
|
|
|
|
94 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx")
|
95 |
df.to_excel(temp_file.name, index=False, engine='openpyxl')
|
96 |
temp_file.close()
|
97 |
logger.debug("Excel νμΌ μμ±λ¨: %s", temp_file.name)
|
|
|
98 |
return df, temp_file.name
|
99 |
|
100 |
+
# --- λ€μ΄λ² κ²μ λ° κ΄κ³ API κ΄λ ¨ (μ°Έμ‘°μ½λ-2) ---
|
101 |
def generate_signature(timestamp, method, uri, secret_key):
|
102 |
message = f"{timestamp}.{method}.{uri}"
|
103 |
digest = hmac.new(secret_key.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).digest()
|
|
|
119 |
API_KEY = os.environ["NAVER_API_KEY"]
|
120 |
SECRET_KEY = os.environ["NAVER_SECRET_KEY"]
|
121 |
CUSTOMER_ID = os.environ["NAVER_CUSTOMER_ID"]
|
|
|
122 |
BASE_URL = "https://api.naver.com"
|
123 |
uri = "/keywordstool"
|
124 |
method = "GET"
|
|
|
134 |
df = pd.DataFrame(data["keywordList"])
|
135 |
if len(df) > 100:
|
136 |
df = df.head(100)
|
|
|
137 |
def parse_count(x):
|
138 |
try:
|
139 |
return int(str(x).replace(",", ""))
|
140 |
except:
|
141 |
return 0
|
|
|
142 |
df["PCμκ²μλ"] = df["monthlyPcQcCnt"].apply(parse_count)
|
143 |
df["λͺ¨λ°μΌμκ²μλ"] = df["monthlyMobileQcCnt"].apply(parse_count)
|
144 |
df["ν νμκ²μλ"] = df["PCμκ²μλ"] + df["λͺ¨λ°μΌμκ²μλ"]
|
|
|
177 |
debug_log(f"process_keyword νΈμΆ, ν€μλλ€: {keywords}, μ°κ΄κ²μμ΄ ν¬ν¨: {include_related}")
|
178 |
input_keywords = [k.strip() for k in keywords.splitlines() if k.strip()]
|
179 |
result_dfs = []
|
|
|
180 |
for idx, kw in enumerate(input_keywords):
|
181 |
df_kw = fetch_related_keywords(kw)
|
182 |
if df_kw.empty:
|
|
|
190 |
df_related = df_kw[df_kw["μ 보ν€μλ"] != kw]
|
191 |
if not df_related.empty:
|
192 |
result_dfs.append(df_related)
|
|
|
193 |
if result_dfs:
|
194 |
result_df = pd.concat(result_dfs, ignore_index=True)
|
195 |
result_df.drop_duplicates(subset=["μ 보ν€μλ"], inplace=True)
|
196 |
else:
|
197 |
result_df = pd.DataFrame(columns=["μ 보ν€μλ", "PCμκ²μλ", "λͺ¨λ°μΌμκ²μλ", "ν νμκ²μλ"])
|
|
|
198 |
result_df["λΈλ‘κ·Έλ¬Έμμ"] = result_df["μ 보ν€μλ"].apply(fetch_blog_count)
|
199 |
result_df.sort_values(by="ν νμκ²μλ", ascending=False, inplace=True)
|
200 |
debug_log("process_keyword μλ£")
|
201 |
return result_df, create_excel_file(result_df)
|
202 |
|
203 |
+
# --- ννμ λΆμκ³Ό κ²μλ/λΈλ‘κ·Έλ¬Έμμ λ³ν© ---
|
204 |
def morphological_analysis_and_enrich(text: str, remove_freq1: bool):
|
205 |
debug_log("morphological_analysis_and_enrich ν¨μ μμ")
|
206 |
df_freq, _ = analyze_text(text)
|
207 |
if df_freq.empty:
|
208 |
debug_log("ννμ λΆμ κ²°κ³Όκ° λΉ λ°μ΄ν°νλ μμ
λλ€.")
|
209 |
return df_freq, ""
|
|
|
210 |
if remove_freq1:
|
211 |
before_shape = df_freq.shape
|
212 |
df_freq = df_freq[df_freq["λΉλμ"] != 1]
|
213 |
debug_log(f"λΉλμ 1 μ κ±° μ μ©λ¨. {before_shape} -> {df_freq.shape}")
|
|
|
|
|
214 |
keywords = "\n".join(df_freq["λ¨μ΄"].tolist())
|
215 |
debug_log(f"λΆμλ ν€μλ: {keywords}")
|
|
|
|
|
216 |
df_keyword_info, _ = process_keyword(keywords, include_related=False)
|
217 |
debug_log("κ²μλ λ° λΈλ‘κ·Έλ¬Έμμ μ‘°ν μλ£")
|
|
|
|
|
218 |
merged_df = pd.merge(df_freq, df_keyword_info, left_on="λ¨μ΄", right_on="μ 보ν€μλ", how="left")
|
219 |
merged_df.drop(columns=["μ 보ν€μλ"], inplace=True)
|
|
|
|
|
220 |
merged_excel_path = create_excel_file(merged_df)
|
221 |
debug_log("morphological_analysis_and_enrich ν¨μ μλ£")
|
222 |
return merged_df, merged_excel_path
|
223 |
|
224 |
+
# --- ν΅ν© λΆμ (ννμ λΆμ + μ§μ μ
λ ₯ ν€μλ) ---
|
225 |
+
def combined_analysis(blog_text: str, remove_freq1: bool, direct_keyword_input: str):
|
226 |
+
debug_log("combined_analysis ν¨μ μμ")
|
227 |
+
# ννμ λΆμ λ° κ²μλ/λΈλ‘κ·Έλ¬Έμμ λ³ν© κ²°κ³Ό
|
228 |
+
merged_df, _ = morphological_analysis_and_enrich(blog_text, remove_freq1)
|
229 |
+
# κ²°κ³Όμ 'μ§μ μ
λ ₯' μ»¬λΌ μΆκ° (μ΄κΈ°κ°: λΉ λ¬Έμμ΄)
|
230 |
+
if "μ§μ μ
λ ₯" not in merged_df.columns:
|
231 |
+
merged_df["μ§μ μ
λ ₯"] = ""
|
232 |
+
# μ§μ μ
λ ₯ν ν€μλ λͺ©λ‘ (μν° λλ ','λ‘ κ΅¬λΆ)
|
233 |
+
direct_keywords = re.split(r'[\n,]+', direct_keyword_input)
|
234 |
+
direct_keywords = [kw.strip() for kw in direct_keywords if kw.strip()]
|
235 |
+
debug_log(f"μ
λ ₯λ μ§μ ν€μλ: {direct_keywords}")
|
236 |
+
for dk in direct_keywords:
|
237 |
+
if dk in merged_df["λ¨μ΄"].values:
|
238 |
+
merged_df.loc[merged_df["λ¨μ΄"] == dk, "μ§μ μ
λ ₯"] = "μ§μ μ
λ ₯"
|
239 |
+
else:
|
240 |
+
freq = blog_text.count(dk)
|
241 |
+
df_direct, _ = process_keyword(dk, include_related=False)
|
242 |
+
if (not df_direct.empty) and (dk in df_direct["μ 보ν€μλ"].values):
|
243 |
+
row = df_direct[df_direct["μ 보ν€μλ"] == dk].iloc[0]
|
244 |
+
pc = row.get("PCμκ²μλ", None)
|
245 |
+
mobile = row.get("λͺ¨λ°μΌμκ²μλ", None)
|
246 |
+
total = row.get("ν νμκ²μλ", None)
|
247 |
+
blog_count = row.get("λΈλ‘κ·Έλ¬Έμμ", None)
|
248 |
+
else:
|
249 |
+
pc = mobile = total = blog_count = None
|
250 |
+
new_row = {
|
251 |
+
"λ¨μ΄": dk,
|
252 |
+
"λΉλμ": freq,
|
253 |
+
"PCμκ²μλ": pc,
|
254 |
+
"λͺ¨λ°μΌμκ²μλ": mobile,
|
255 |
+
"ν νμκ²μλ": total,
|
256 |
+
"λΈλ‘κ·Έλ¬Έμμ": blog_count,
|
257 |
+
"μ§μ μ
λ ₯": "μ§μ μ
λ ₯"
|
258 |
+
}
|
259 |
+
merged_df = pd.concat([merged_df, pd.DataFrame([new_row])], ignore_index=True)
|
260 |
+
merged_df = merged_df.sort_values(by="λΉλμ", ascending=False).reset_index(drop=True)
|
261 |
+
combined_excel = create_excel_file(merged_df)
|
262 |
+
debug_log("combined_analysis ν¨μ μλ£")
|
263 |
+
return merged_df, combined_excel
|
264 |
|
265 |
+
# --- μ€ν¬λν μ€ν ---
|
266 |
def fetch_blog_content(url: str):
|
267 |
debug_log("fetch_blog_content ν¨μ μμ")
|
268 |
content = scrape_naver_blog(url)
|
269 |
debug_log("fetch_blog_content ν¨μ μλ£")
|
270 |
return content
|
271 |
|
272 |
+
# --- Gradio μΈν°νμ΄μ€ κ΅¬μ± ---
|
273 |
with gr.Blocks(title="λ€μ΄λ² λΈλ‘κ·Έ ννμ λΆμ μ€νμ΄μ€", css=".gradio-container { max-width: 960px; margin: auto; }") as demo:
|
274 |
gr.Markdown("# λ€μ΄λ² λΈλ‘κ·Έ ννμ λΆμ μ€νμ΄μ€")
|
275 |
with gr.Row():
|
|
|
279 |
blog_content_box = gr.Textbox(label="λΈλ‘κ·Έ λ΄μ© (μμ κ°λ₯)", lines=10, placeholder="μ€ν¬λνλ λΈλ‘κ·Έ λ΄μ©μ΄ μ¬κΈ°μ νμλ©λλ€.")
|
280 |
with gr.Row():
|
281 |
remove_freq_checkbox = gr.Checkbox(label="λΉλμ1 μ κ±°", value=False)
|
282 |
+
direct_keyword_box = gr.Textbox(label="μ§μ ν€μλ μ
λ ₯ (μν° λλ ','λ‘ κ΅¬λΆ)", lines=2, placeholder="μ: ν€μλ1, ν€μλ2\nν€μλ3")
|
|
|
283 |
with gr.Row():
|
284 |
analyze_button = gr.Button("λΆμ μ€ν")
|
|
|
|
|
285 |
with gr.Row():
|
286 |
+
result_df = gr.Dataframe(label="ν΅ν© λΆμ κ²°κ³Ό (λ¨μ΄, λΉλμ, κ²μλ, λΈλ‘κ·Έλ¬Έμμ, μ§μ μ
λ ₯)", interactive=True)
|
|
|
|
|
|
|
287 |
with gr.Row():
|
288 |
+
excel_file = gr.File(label="Excel λ€μ΄λ‘λ")
|
|
|
289 |
|
290 |
+
# μ΄λ²€νΈ μ°κ²°
|
291 |
scrape_button.click(fn=fetch_blog_content, inputs=blog_url_input, outputs=blog_content_box)
|
292 |
+
analyze_button.click(fn=combined_analysis, inputs=[blog_content_box, remove_freq_checkbox, direct_keyword_box],
|
293 |
+
outputs=[result_df, excel_file])
|
|
|
294 |
|
295 |
if __name__ == "__main__":
|
296 |
debug_log("Gradio μ± μ€ν μμ")
|