Kims12 commited on
Commit
b43fad9
Β·
verified Β·
1 Parent(s): 71ba060

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -246
app.py CHANGED
@@ -1,265 +1,155 @@
 
1
  import time
2
- import hashlib
3
  import hmac
 
4
  import base64
5
  import requests
6
- import gradio as gr
7
- import urllib.request
8
- import urllib.parse
9
- import json
10
  import pandas as pd
11
- from concurrent.futures import ThreadPoolExecutor
12
- import os
13
  import tempfile
14
- from datetime import datetime
15
- from dotenv import load_dotenv # dotenv μΆ”κ°€
16
-
17
- # .env 파일의 ν™˜κ²½ λ³€μˆ˜λ₯Ό λ‘œλ“œν•©λ‹ˆλ‹€.
18
- load_dotenv()
19
-
20
- # ν™˜κ²½ λ³€μˆ˜μ—μ„œ API 킀와 μ‹œν¬λ¦Ώ ν‚€λ₯Ό λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.
21
- BASE_URL = "https://api.searchad.naver.com"
22
- API_KEY = os.environ.get("NAVER_API_KEY")
23
- SECRET_KEY = os.environ.get("NAVER_SECRET_KEY")
24
- CUSTOMER_ID = 2666992
25
-
26
- # ν™˜κ²½ λ³€μˆ˜μ—μ„œ ν΄λΌμ΄μ–ΈνŠΈ ID와 μ‹œν¬λ¦Ώμ„ λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.
27
- CLIENT_ID = os.environ.get("NAVER_CLIENT_ID")
28
- CLIENT_SECRET = os.environ.get("NAVER_CLIENT_SECRET")
29
-
30
- # ν™˜κ²½ λ³€μˆ˜ λ‘œλ“œ 확인
31
- if not API_KEY or not SECRET_KEY or not CLIENT_ID or not CLIENT_SECRET:
32
- raise ValueError("ν•„μˆ˜ ν™˜κ²½ λ³€μˆ˜κ°€ μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€. .env νŒŒμΌμ„ ν™•μΈν•˜μ„Έμš”.")
33
- else:
34
- print("ν™˜κ²½ λ³€μˆ˜κ°€ μ •μƒμ μœΌλ‘œ λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€.")
35
-
36
- class NaverAPI:
37
- def __init__(self, base_url, api_key, secret_key, customer_id):
38
- self.base_url = base_url
39
- self.api_key = api_key
40
- self.secret_key = secret_key
41
- self.customer_id = customer_id
42
-
43
- def generate_signature(self, timestamp, method, path):
44
- sign = f"{timestamp}.{method}.{path}"
45
- signature = hmac.new(self.secret_key.encode('utf-8'), sign.encode('utf-8'), hashlib.sha256).digest()
46
- return base64.b64encode(signature).decode('utf-8')
47
-
48
- def get_timestamp(self):
49
- return str(int(time.time() * 1000))
50
-
51
- def get_headers(self, method, uri):
52
- timestamp = self.get_timestamp()
53
- headers = {
54
- 'Content-Type': 'application/json; charset=UTF-8',
55
- 'X-Timestamp': timestamp,
56
- 'X-API-KEY': self.api_key,
57
- 'X-Customer': str(self.customer_id),
58
- 'X-Signature': self.generate_signature(timestamp, method, uri),
59
- }
60
- return headers
61
-
62
- def get_keywords_data(self, keywords):
63
- uri = "/keywordstool"
64
- method = "GET"
65
- query = {
66
- 'hintKeywords': ','.join(keywords),
67
- 'showDetail': 1
68
- }
69
- headers = self.get_headers(method, uri)
70
- response = requests.get(self.base_url + uri, headers=headers, params=query)
71
- response.raise_for_status() # HTTP 였λ₯˜ λ°œμƒ μ‹œ μ˜ˆμ™Έ λ°œμƒ
72
- return response.json()
73
-
74
- def get_blog_count(keyword):
75
- # ν΄λΌμ΄μ–ΈνŠΈ ID와 μ‹œν¬λ¦Ώμ„ ν™˜κ²½ λ³€μˆ˜μ—μ„œ λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.
76
- client_id = CLIENT_ID
77
- client_secret = CLIENT_SECRET
78
-
79
- # keywordκ°€ λ°”μ΄νŠΈ νƒ€μž…μΌ 경우 λ””μ½”λ”©
80
- if isinstance(keyword, bytes):
81
- keyword = keyword.decode('utf-8')
82
- elif not isinstance(keyword, str):
83
- keyword = str(keyword)
84
 
85
- encText = urllib.parse.quote(keyword)
86
- url = "https://openapi.naver.com/v1/search/blog?query=" + encText
87
- request = urllib.request.Request(url)
88
- request.add_header("X-Naver-Client-Id", client_id)
89
- request.add_header("X-Naver-Client-Secret", client_secret)
90
- try:
91
- response = urllib.request.urlopen(request)
92
- rescode = response.getcode()
93
- if rescode == 200:
94
- response_body = response.read()
95
- data = json.loads(response_body.decode('utf-8'))
96
- return data.get('total', 0)
97
- else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  return 0
99
- except Exception as e:
100
- print(f"Error fetching blog count for keyword '{keyword}': {e}")
101
- return 0
102
 
103
- def get_keywords_data_chunk(chunk):
104
- api = NaverAPI(BASE_URL, API_KEY, SECRET_KEY, CUSTOMER_ID)
105
- return api.get_keywords_data(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- def get_blog_count_parallel(keyword):
108
- return (keyword, get_blog_count(keyword))
 
 
 
 
109
 
110
- def get_search_volumes(keyword):
 
111
  """
112
- 단일 ν‚€μ›Œλ“œμ˜ μ›” κ²€μƒ‰λŸ‰μ„ κ°€μ Έμ˜€λŠ” ν•¨μˆ˜.
 
 
 
113
  """
114
- api = NaverAPI(BASE_URL, API_KEY, SECRET_KEY, CUSTOMER_ID)
115
- try:
116
- data = api.get_keywords_data([keyword])
117
- if 'keywordList' in data and len(data['keywordList']) > 0:
118
- # keywordListμ—μ„œ μž…λ ₯ν•œ ν‚€μ›Œλ“œμ™€ μΌμΉ˜ν•˜λŠ” ν•­λͺ©μ„ μ°ΎμŠ΅λ‹ˆλ‹€.
119
- for item in data['keywordList']:
120
- if item['relKeyword'].strip().lower() == keyword.strip().lower():
121
- monthly_pc = item.get('monthlyPcQcCnt', 0)
122
- monthly_mobile = item.get('monthlyMobileQcCnt', 0)
123
-
124
- if isinstance(monthly_pc, str):
125
- monthly_pc = monthly_pc.replace(',', '').replace('< 10', '0')
126
- try:
127
- monthly_pc = int(monthly_pc)
128
- except ValueError:
129
- monthly_pc = 0
130
- if isinstance(monthly_mobile, str):
131
- monthly_mobile = monthly_mobile.replace(',', '').replace('< 10', '0')
132
- try:
133
- monthly_mobile = int(monthly_mobile)
134
- except ValueError:
135
- monthly_mobile = 0
136
-
137
- total_searches = monthly_pc + monthly_mobile
138
- return (keyword, monthly_pc, monthly_mobile, total_searches)
139
- # μž…λ ₯ν•œ ν‚€μ›Œλ“œμ™€ μΌμΉ˜ν•˜λŠ” ν•­λͺ©μ΄ 없을 경우
140
- return (keyword, 0, 0, 0)
141
  else:
142
- return (keyword, 0, 0, 0)
143
- except Exception as e:
144
- print(f"Error fetching search volumes for keyword '{keyword}': {e}")
145
- return (keyword, 0, 0, 0)
146
-
147
- def get_monthly_search_volumes(keywords, include_related_keywords=True):
148
- all_data = []
149
- results = []
150
-
151
- if include_related_keywords:
152
- chunk_size = 10 # ν‚€μ›Œλ“œλ₯Ό 10κ°œμ”© λ‚˜λˆ„μ–΄ μš”μ²­
153
- # API 병렬 μš”μ²­
154
- with ThreadPoolExecutor(max_workers=5) as executor:
155
- futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
156
- for future in futures:
157
- try:
158
- data = future.result()
159
- if 'keywordList' in data:
160
- all_data.extend(data['keywordList'])
161
- except Exception as e:
162
- print(f"Error fetching keywords data chunk: {e}")
163
-
164
- if not all_data:
165
- return [("Error", "데이터가 λ°˜ν™˜λ˜μ§€ μ•Šμ•˜κ±°λ‚˜ API 응닡이 μœ νš¨ν•˜μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.", "", "", "")]
166
 
167
- unique_keywords = set()
168
- for item in all_data:
169
- keyword = item['relKeyword']
170
- if keyword not in unique_keywords:
171
- unique_keywords.add(keyword)
172
- monthly_pc = item.get('monthlyPcQcCnt', 0)
173
- monthly_mobile = item.get('monthlyMobileQcCnt', 0)
174
-
175
- if isinstance(monthly_pc, str):
176
- monthly_pc = monthly_pc.replace(',', '').replace('< 10', '0')
177
- try:
178
- monthly_pc = int(monthly_pc)
179
- except ValueError:
180
- monthly_pc = 0
181
- if isinstance(monthly_mobile, str):
182
- monthly_mobile = monthly_mobile.replace(',', '').replace('< 10', '0')
183
- try:
184
- monthly_mobile = int(monthly_mobile)
185
- except ValueError:
186
- monthly_mobile = 0
187
-
188
- total_searches = monthly_pc + monthly_mobile
189
- results.append((keyword, monthly_pc, monthly_mobile, total_searches))
190
-
191
- if len(results) >= 100:
192
- break
193
-
194
  else:
195
- # 연관검색어λ₯Ό ν¬ν•¨ν•˜μ§€ μ•ŠμœΌλ―€λ‘œ μž…λ ₯ ν‚€μ›Œλ“œλ§Œ 처리
196
- with ThreadPoolExecutor(max_workers=5) as executor:
197
- futures = [executor.submit(get_search_volumes, keyword) for keyword in keywords]
198
- for future in futures:
199
- try:
200
- result = future.result()
201
- results.append(result)
202
- except Exception as e:
203
- print(f"Error fetching search volumes for keyword '{keyword}': {e}")
204
- results.append((keyword, 0, 0, 0))
205
 
206
- if not results:
207
- return [("Error", "데이터가 λ°˜ν™˜λ˜μ§€ μ•Šμ•˜κ±°λ‚˜ API 응닡이 μœ νš¨ν•˜μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.", "", "", "")]
208
 
209
- # λΈ”λ‘œκ·Έ λ¬Έμ„œ 수 병렬 μš”μ²­
210
- with ThreadPoolExecutor(max_workers=5) as executor:
211
- if include_related_keywords:
212
- blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
213
- for i, future in enumerate(blog_futures):
214
- try:
215
- keyword, blog_count = future.result()
216
- results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], blog_count)
217
- except Exception as e:
218
- print(f"Error fetching blog count for keyword '{results[i][0]}': {e}")
219
- results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], "Error")
220
- else:
221
- blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
222
- temp_results = []
223
- for future in blog_futures:
224
- try:
225
- keyword, blog_count = future.result()
226
- temp_results.append((keyword, results[0][1], results[0][2], results[0][3], blog_count))
227
- except Exception as e:
228
- print(f"Error fetching blog count for keyword '{keyword}': {e}")
229
- temp_results.append((keyword, results[0][1], results[0][2], results[0][3], "Error"))
230
- results = temp_results
231
-
232
- return results
233
-
234
- def save_to_excel(results, keyword):
235
- df = pd.DataFrame(results, columns=["ν‚€μ›Œλ“œ", "PCμ›”κ²€μƒ‰λŸ‰", "λͺ¨λ°”μΌμ›”κ²€μƒ‰λŸ‰", "ν† νƒˆμ›”κ²€μƒ‰λŸ‰", "λΈ”λ‘œκ·Έλ¬Έμ„œμˆ˜"])
236
- now = datetime.now().strftime('%Y-%m-%d')
237
- sanitized_keyword = keyword.replace(' ', '_')
238
- filename = f"{now}_{sanitized_keyword}_연관검색어.xlsx"
239
- file_path = os.path.join(tempfile.gettempdir(), filename)
240
- df.to_excel(file_path, index=False)
241
- return file_path
242
-
243
- def display_search_volumes(keywords, include_related):
244
- keyword_list = [keyword.strip() for keyword in keywords.split(',') if keyword.strip()]
245
- if not keyword_list:
246
- return [("Error", "μž…λ ₯된 ν‚€μ›Œλ“œκ°€ μ—†μŠ΅λ‹ˆλ‹€.", "", "", "")], None
247
- results = get_monthly_search_volumes(keyword_list, include_related_keywords=include_related)
248
- file_path = save_to_excel(results, keywords)
249
- return results, file_path
250
-
251
- iface = gr.Interface(
252
- fn=display_search_volumes,
253
- inputs=[
254
- gr.Textbox(placeholder="ν‚€μ›Œλ“œλ₯Ό μž…λ ₯ν•˜μ„Έμš” (μ‰Όν‘œλ‘œ ꡬ뢄)", lines=2),
255
- gr.Checkbox(label="연관검색어 포함", value=True) # 연관검색어 ν† κΈ€ μΆ”κ°€
256
- ],
257
- outputs=[
258
- gr.Dataframe(headers=["ν‚€μ›Œλ“œ", "PCμ›”κ²€μƒ‰λŸ‰", "λͺ¨λ°”μΌμ›”κ²€μƒ‰λŸ‰", "ν† νƒˆμ›”κ²€μƒ‰λŸ‰", "λΈ”λ‘œκ·Έλ¬Έμ„œμˆ˜"]),
259
- gr.File(label="λ‹€μš΄λ‘œλ“œ μ—‘μ…€ 파일")
260
- ],
261
- title="넀이버 μ›”κ²€μƒ‰λŸ‰ 검색기",
262
- description="ν‚€μ›Œλ“œμ˜ μ›” κ²€μƒ‰λŸ‰κ³Ό λΈ”λ‘œκ·Έ λ¬Έμ„œ 수λ₯Ό 확인할 수 μžˆμŠ΅λ‹ˆλ‹€. 연관검색어λ₯Ό 포함할지 μ„ νƒν•˜μ„Έμš”.",
263
- )
264
 
265
- iface.launch(share=True) # share=Trueλ₯Ό μΆ”κ°€ν•˜μ—¬ 곡개 링크 생성
 
 
1
+ import os
2
  import time
 
3
  import hmac
4
+ import hashlib
5
  import base64
6
  import requests
 
 
 
 
7
  import pandas as pd
 
 
8
  import tempfile
9
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ # 넀이버 κ΄‘κ³  API 호좜 μ‹œ μ‚¬μš©ν•  μ„œλͺ… 생성 ν•¨μˆ˜
12
+ def generate_signature(timestamp, method, uri, secret_key):
13
+ message = f"{timestamp}.{method}.{uri}"
14
+ digest = hmac.new(secret_key.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).digest()
15
+ return base64.b64encode(digest).decode()
16
+
17
+ # 넀이버 κ΄‘κ³  API 호좜 헀더 생성 ν•¨μˆ˜
18
+ def get_header(method, uri, api_key, secret_key, customer_id):
19
+ timestamp = str(round(time.time() * 1000))
20
+ signature = generate_signature(timestamp, method, uri, secret_key)
21
+ return {
22
+ "Content-Type": "application/json; charset=UTF-8",
23
+ "X-Timestamp": timestamp,
24
+ "X-API-KEY": api_key,
25
+ "X-Customer": str(customer_id),
26
+ "X-Signature": signature
27
+ }
28
+
29
+ # 넀이버 κ΄‘κ³  APIλ₯Ό 톡해 단일 ν‚€μ›Œλ“œμ˜ 연관검색어 및 κ²€μƒ‰λŸ‰ 정보λ₯Ό κ°€μ Έμ˜€λŠ” ν•¨μˆ˜
30
+ def fetch_related_keywords(keyword):
31
+ # ν™˜κ²½λ³€μˆ˜μ—μ„œ κ΄‘κ³  API 킀값듀을 λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.
32
+ API_KEY = os.environ["NAVER_API_KEY"]
33
+ SECRET_KEY = os.environ["NAVER_SECRET_KEY"]
34
+ CUSTOMER_ID = os.environ["NAVER_CUSTOMER_ID"]
35
+
36
+ BASE_URL = "https://api.naver.com"
37
+ uri = "/keywordstool"
38
+ method = "GET"
39
+ headers = get_header(method, uri, API_KEY, SECRET_KEY, CUSTOMER_ID)
40
+ params = {
41
+ "hintKeywords": [keyword],
42
+ "showDetail": "1"
43
+ }
44
+ response = requests.get(BASE_URL + uri, params=params, headers=headers)
45
+ data = response.json()
46
+ if "keywordList" not in data:
47
+ return pd.DataFrame()
48
+ df = pd.DataFrame(data["keywordList"])
49
+ if len(df) > 100:
50
+ df = df.head(100)
51
+
52
+ def parse_count(x):
53
+ try:
54
+ x_str = str(x).replace(",", "")
55
+ return int(x_str)
56
+ except:
57
  return 0
 
 
 
58
 
59
+ df["PCμ›”κ²€μƒ‰λŸ‰"] = df["monthlyPcQcCnt"].apply(parse_count)
60
+ df["λͺ¨λ°”μΌμ›”κ²€μƒ‰λŸ‰"] = df["monthlyMobileQcCnt"].apply(parse_count)
61
+ df["ν† νƒˆμ›”κ²€μƒ‰λŸ‰"] = df["PCμ›”κ²€μƒ‰λŸ‰"] + df["λͺ¨λ°”μΌμ›”κ²€μƒ‰λŸ‰"]
62
+ df.rename(columns={"relKeyword": "μ •λ³΄ν‚€μ›Œλ“œ"}, inplace=True)
63
+ result_df = df[["μ •λ³΄ν‚€μ›Œλ“œ", "PCμ›”κ²€μƒ‰λŸ‰", "λͺ¨λ°”μΌμ›”κ²€μƒ‰λŸ‰", "ν† νƒˆμ›”κ²€μƒ‰λŸ‰"]]
64
+ return result_df
65
+
66
+ # 넀이버 검색 개발 APIλ₯Ό ν™œμš©ν•˜μ—¬ λΈ”λ‘œκ·Έ λ¬Έμ„œμˆ˜λ₯Ό μ‘°νšŒν•˜λŠ” ν•¨μˆ˜
67
+ def fetch_blog_count(keyword):
68
+ # ν™˜κ²½λ³€μˆ˜μ—μ„œ 넀이버 검색 API 자격증λͺ…을 λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.
69
+ client_id = os.environ["NAVER_SEARCH_CLIENT_ID"]
70
+ client_secret = os.environ["NAVER_SEARCH_CLIENT_SECRET"]
71
+ url = "https://openapi.naver.com/v1/search/blog.json"
72
+ headers = {
73
+ "X-Naver-Client-Id": client_id,
74
+ "X-Naver-Client-Secret": client_secret
75
+ }
76
+ params = {"query": keyword, "display": 1}
77
+ response = requests.get(url, headers=headers, params=params)
78
+ if response.status_code == 200:
79
+ data = response.json()
80
+ return data.get("total", 0)
81
+ else:
82
+ return 0
83
 
84
+ # μž„μ‹œ μ—‘μ…€ 파일 생성 ν•¨μˆ˜
85
+ def create_excel_file(df):
86
+ with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp:
87
+ excel_path = tmp.name
88
+ df.to_excel(excel_path, index=False)
89
+ return excel_path
90
 
91
+ # μž…λ ₯된 μ—¬λŸ¬ ν‚€μ›Œλ“œλ₯Ό μ²˜λ¦¬ν•˜λŠ” ν•¨μˆ˜
92
+ def process_keyword(keywords: str, include_related: bool):
93
  """
94
+ 1. ν…μŠ€νŠΈλ°•μŠ€μ— μ—”ν„°λ‘œ κ΅¬λΆ„λœ μ—¬λŸ¬ ν‚€μ›Œλ“œλ₯Ό λ°›μ•„ 각 ν‚€μ›Œλ“œμ— λŒ€ν•΄ 넀이버 κ΄‘κ³  APIλ₯Ό 톡해 κ²€μƒ‰λŸ‰ 정보λ₯Ό μ‘°νšŒν•©λ‹ˆλ‹€.
95
+ 2. 각 ν‚€μ›Œλ“œμ— λŒ€ν•΄ μž…λ ₯ν•œ ν‚€μ›Œλ“œ 자체의 κ²°κ³Όλ₯Ό ν¬ν•¨ν•©λ‹ˆλ‹€.
96
+ 3. μ²΄ν¬λ°•μŠ€(True)인 경우, 첫 번째 ν‚€μ›Œλ“œμ— λŒ€ν•΄μ„œλ§Œ 연관검색어(μž…λ ₯ ν‚€μ›Œλ“œλ₯Ό μ œμ™Έν•œ κ²°κ³Ό)λ₯Ό μΆ”κ°€ν•©λ‹ˆλ‹€.
97
+ 4. λ§ˆμ§€λ§‰μœΌλ‘œ, 각 "μ •λ³΄ν‚€μ›Œλ“œ"에 λŒ€ν•΄ 넀이버 검색 APIλ₯Ό ν˜ΈμΆœν•˜μ—¬ λΈ”λ‘œκ·Έ λ¬Έμ„œμˆ˜λ₯Ό μ‘°νšŒν•˜κ³  "λΈ”λ‘œκ·Έλ¬Έμ„œμˆ˜" μ»¬λŸΌμ— μΆ”κ°€ν•©λ‹ˆλ‹€.
98
  """
99
+ # μ€„λ°”κΏˆμœΌλ‘œ λΆ„λ¦¬ν•˜μ—¬ μž…λ ₯ ν‚€μ›Œλ“œ 리슀트 생성 (빈 쀄 μ œμ™Έ)
100
+ input_keywords = [k.strip() for k in keywords.splitlines() if k.strip() != ""]
101
+ result_dfs = []
102
+
103
+ for idx, kw in enumerate(input_keywords):
104
+ df_kw = fetch_related_keywords(kw)
105
+ if df_kw.empty:
106
+ continue
107
+ # μž…λ ₯ ν‚€μ›Œλ“œ 자체의 κ²°κ³Όλ₯Ό μš°μ„  포함
108
+ row_kw = df_kw[df_kw["μ •λ³΄ν‚€μ›Œλ“œ"] == kw]
109
+ if not row_kw.empty:
110
+ result_dfs.append(row_kw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  else:
112
+ # μž…λ ₯ ν‚€μ›Œλ“œμ— ν•΄λ‹Ήν•˜λŠ” 행이 μ—†μœΌλ©΄ 첫 번째 행을 λŒ€μ²΄λ‘œ μΆ”κ°€
113
+ result_dfs.append(df_kw.head(1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ # μ²΄ν¬λ°•μŠ€κ°€ True이고, 첫 번째 ν‚€μ›Œλ“œμ— λŒ€ν•΄μ„œλ§Œ 연관검색어 μΆ”κ°€ (μž…λ ₯ ν‚€μ›Œλ“œ μ œμ™Έ)
116
+ if include_related and idx == 0:
117
+ df_related = df_kw[df_kw["μ •λ³΄ν‚€μ›Œλ“œ"] != kw]
118
+ if not df_related.empty:
119
+ result_dfs.append(df_related)
120
+
121
+ if result_dfs:
122
+ result_df = pd.concat(result_dfs, ignore_index=True)
123
+ result_df.drop_duplicates(subset=["μ •λ³΄ν‚€μ›Œλ“œ"], inplace=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  else:
125
+ result_df = pd.DataFrame(columns=["μ •λ³΄ν‚€μ›Œλ“œ", "PCμ›”κ²€μƒ‰λŸ‰", "λͺ¨λ°”μΌμ›”κ²€μƒ‰λŸ‰", "ν† νƒˆμ›”κ²€μƒ‰λŸ‰"])
 
 
 
 
 
 
 
 
 
126
 
127
+ # λΈ”λ‘œκ·Έ λ¬Έμ„œμˆ˜ 컬럼 μΆ”κ°€: 각 μ •λ³΄ν‚€μ›Œλ“œλ§ˆλ‹€ 넀이버 λΈ”λ‘œκ·Έ 검색 API둜 총 λ¬Έμ„œμˆ˜λ₯Ό 쑰회
128
+ result_df["λΈ”λ‘œκ·Έλ¬Έμ„œμˆ˜"] = result_df["μ •λ³΄ν‚€μ›Œλ“œ"].apply(fetch_blog_count)
129
 
130
+ result_df.sort_values(by="ν† νƒˆμ›”κ²€μƒ‰λŸ‰", ascending=False, inplace=True)
131
+ return result_df, create_excel_file(result_df)
132
+
133
+ # Gradio UI ꡬ성
134
+ with gr.Blocks() as demo:
135
+ gr.Markdown("### 넀이버 연관검색어 및 κ²€μƒ‰λŸ‰, λΈ”λ‘œκ·Έ λ¬Έμ„œμˆ˜ 쑰회 μ•±")
136
+ gr.Markdown(
137
+ "μ—¬λŸ¬ ν‚€μ›Œλ“œλ₯Ό μ—”ν„°λ‘œ κ΅¬λΆ„ν•˜μ—¬ μž…λ ₯ν•˜λ©΄ 각 ν‚€μ›Œλ“œμ˜ κ²€μƒ‰λŸ‰ 정보λ₯Ό μ‘°νšŒν•˜κ³ , "
138
+ "첫 번째 ν‚€μ›Œλ“œμ˜ 경우 '연관검색어 포함' 체크 μ‹œ 연관검색어도 ν•¨κ»˜ μ‘°νšŒν•©λ‹ˆλ‹€. "
139
+ "λ˜ν•œ, 각 μ •λ³΄ν‚€μ›Œλ“œμ— λŒ€ν•œ 넀이버 λΈ”λ‘œκ·Έ λ¬Έμ„œμˆ˜λ„ ν•¨κ»˜ 좜λ ₯λ©λ‹ˆλ‹€."
140
+ )
141
+
142
+ with gr.Row():
143
+ keyword_input = gr.Textbox(label="ν‚€μ›Œλ“œ μž…λ ₯ (μ—¬λŸ¬ 개일 경우 μ—”ν„°λ‘œ ꡬ뢄)", lines=5, placeholder="예:\nκ°•μ›λ„ν’€λΉŒλΌ\nμžλ°”μŠ€ν¬λ¦½νŠΈ")
144
+ include_checkbox = gr.Checkbox(label="연관검색어 포함 (첫번째 ν‚€μ›Œλ“œμ— ν•œν•¨)", value=False)
145
+ search_button = gr.Button("검색")
146
+
147
+ with gr.Row():
148
+ df_output = gr.Dataframe(label="검색 κ²°κ³Ό")
149
+ excel_output = gr.File(label="μ—‘μ…€ λ‹€μš΄λ‘œλ“œ")
150
+
151
+ # λ²„νŠΌ 클릭 μ‹œ process_keyword ν•¨μˆ˜ μ‹€ν–‰
152
+ search_button.click(fn=process_keyword, inputs=[keyword_input, include_checkbox], outputs=[df_output, excel_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
+ # μ•± μ‹€ν–‰ (Hugging Face Spaces 배포 κ°€λŠ₯)
155
+ demo.launch()