Update app.py
Browse files
app.py
CHANGED
@@ -100,12 +100,12 @@ def get_keywords_data_chunk(chunk):
|
|
100 |
def get_blog_count_parallel(keyword):
|
101 |
return (keyword, get_blog_count(keyword))
|
102 |
|
103 |
-
def get_monthly_search_volumes(keywords,
|
104 |
all_data = []
|
105 |
chunk_size = 10 # ํค์๋๋ฅผ 10๊ฐ์ฉ ๋๋์ด ์์ฒญ
|
106 |
|
107 |
-
if
|
108 |
-
#
|
109 |
with ThreadPoolExecutor(max_workers=5) as executor:
|
110 |
futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
|
111 |
for future in futures:
|
@@ -116,8 +116,14 @@ def get_monthly_search_volumes(keywords, include_related):
|
|
116 |
except Exception as e:
|
117 |
print(f"Error fetching keywords data chunk: {e}")
|
118 |
else:
|
119 |
-
#
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
if not all_data:
|
123 |
return [("Error", "๋ฐ์ดํฐ๊ฐ ๋ฐํ๋์ง ์์๊ฑฐ๋ API ์๋ต์ด ์ ํจํ์ง ์์ต๋๋ค.", "", "", "")] # ๋ธ๋ก๊ทธ ๋ฌธ์ ์ ์นผ๋ผ ์ถ๊ฐ
|
@@ -139,19 +145,33 @@ def get_monthly_search_volumes(keywords, include_related):
|
|
139 |
total_searches = monthly_pc + monthly_mobile
|
140 |
results.append((keyword, monthly_pc, monthly_mobile, total_searches))
|
141 |
|
142 |
-
if len(results) >= 100:
|
143 |
break
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
return results
|
157 |
|
@@ -166,7 +186,7 @@ def save_to_excel(results, keyword):
|
|
166 |
|
167 |
def display_search_volumes(keywords, include_related):
|
168 |
keyword_list = [keyword.strip() for keyword in keywords.split(',')]
|
169 |
-
results = get_monthly_search_volumes(keyword_list, include_related)
|
170 |
file_path = save_to_excel(results, keywords)
|
171 |
return results, file_path
|
172 |
|
@@ -174,14 +194,14 @@ iface = gr.Interface(
|
|
174 |
fn=display_search_volumes,
|
175 |
inputs=[
|
176 |
gr.Textbox(placeholder="ํค์๋๋ฅผ ์
๋ ฅํ์ธ์"),
|
177 |
-
gr.Checkbox(label="์ฐ๊ด๊ฒ์์ด ํฌํจ", value=True) # ์ฐ๊ด๊ฒ์์ด
|
178 |
],
|
179 |
outputs=[
|
180 |
gr.Dataframe(headers=["ํค์๋", "PC์๊ฒ์๋", "๋ชจ๋ฐ์ผ์๊ฒ์๋", "ํ ํ์๊ฒ์๋", "๋ธ๋ก๊ทธ๋ฌธ์์"]),
|
181 |
gr.File(label="๋ค์ด๋ก๋ ์์
ํ์ผ")
|
182 |
],
|
183 |
title="๋ค์ด๋ฒ ์๊ฒ์๋ ๊ฒ์๊ธฐ",
|
184 |
-
description="
|
185 |
)
|
186 |
|
187 |
iface.launch(share=True) # share=True๋ฅผ ์ถ๊ฐํ์ฌ ๊ณต๊ฐ ๋งํฌ ์์ฑ
|
|
|
100 |
def get_blog_count_parallel(keyword):
|
101 |
return (keyword, get_blog_count(keyword))
|
102 |
|
103 |
+
def get_monthly_search_volumes(keywords, include_related_keywords=True):
|
104 |
all_data = []
|
105 |
chunk_size = 10 # ํค์๋๋ฅผ 10๊ฐ์ฉ ๋๋์ด ์์ฒญ
|
106 |
|
107 |
+
if include_related_keywords:
|
108 |
+
# API ๋ณ๋ ฌ ์์ฒญ
|
109 |
with ThreadPoolExecutor(max_workers=5) as executor:
|
110 |
futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
|
111 |
for future in futures:
|
|
|
116 |
except Exception as e:
|
117 |
print(f"Error fetching keywords data chunk: {e}")
|
118 |
else:
|
119 |
+
# ์ฐ๊ด๊ฒ์์ด๋ฅผ ํฌํจํ์ง ์์ผ๋ฏ๋ก ์
๋ ฅ ํค์๋๋ง ์ฒ๋ฆฌ
|
120 |
+
for keyword in keywords:
|
121 |
+
# ๊ฐ์์ ๋ฐ์ดํฐ ๊ตฌ์กฐ์ ๋ง์ถ๊ธฐ ์ํด
|
122 |
+
all_data.append({
|
123 |
+
'relKeyword': keyword,
|
124 |
+
'monthlyPcQcCnt': '0', # ์ค์ API์์ ๊ฐ์ ๊ฐ์ ธ์ค๋ ค๋ฉด ๋ณ๋ ์์ฒญ ํ์
|
125 |
+
'monthlyMobileQcCnt': '0'
|
126 |
+
})
|
127 |
|
128 |
if not all_data:
|
129 |
return [("Error", "๋ฐ์ดํฐ๊ฐ ๋ฐํ๋์ง ์์๊ฑฐ๋ API ์๋ต์ด ์ ํจํ์ง ์์ต๋๋ค.", "", "", "")] # ๋ธ๋ก๊ทธ ๋ฌธ์ ์ ์นผ๋ผ ์ถ๊ฐ
|
|
|
145 |
total_searches = monthly_pc + monthly_mobile
|
146 |
results.append((keyword, monthly_pc, monthly_mobile, total_searches))
|
147 |
|
148 |
+
if len(results) >= 100 and include_related_keywords:
|
149 |
break
|
150 |
|
151 |
+
if include_related_keywords:
|
152 |
+
# ๋ธ๋ก๊ทธ ๋ฌธ์ ์ ๋ณ๋ ฌ ์์ฒญ
|
153 |
+
with ThreadPoolExecutor(max_workers=5) as executor:
|
154 |
+
blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
|
155 |
+
for i, future in enumerate(blog_futures):
|
156 |
+
try:
|
157 |
+
keyword, blog_count = future.result()
|
158 |
+
results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], blog_count)
|
159 |
+
except Exception as e:
|
160 |
+
print(f"Error fetching blog count for keyword '{results[i][0]}': {e}")
|
161 |
+
results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], "Error")
|
162 |
+
else:
|
163 |
+
# ๋ธ๋ก๊ทธ ๋ฌธ์ ์ ๋ณ๋ ฌ ์์ฒญ (์ฐ๊ด๊ฒ์์ด๊ฐ ์๋ ๊ฒฝ์ฐ ๊ฐ ํค์๋์ ๋ํด)
|
164 |
+
with ThreadPoolExecutor(max_workers=5) as executor:
|
165 |
+
blog_futures = [executor.submit(get_blog_count_parallel, keyword) for keyword in results]
|
166 |
+
temp_results = []
|
167 |
+
for future in blog_futures:
|
168 |
+
try:
|
169 |
+
keyword, blog_count = future.result()
|
170 |
+
temp_results.append((keyword, 0, 0, 0, blog_count))
|
171 |
+
except Exception as e:
|
172 |
+
print(f"Error fetching blog count for keyword '{keyword}': {e}")
|
173 |
+
temp_results.append((keyword, 0, 0, 0, "Error"))
|
174 |
+
results = temp_results
|
175 |
|
176 |
return results
|
177 |
|
|
|
186 |
|
187 |
def display_search_volumes(keywords, include_related):
|
188 |
keyword_list = [keyword.strip() for keyword in keywords.split(',')]
|
189 |
+
results = get_monthly_search_volumes(keyword_list, include_related_keywords=include_related)
|
190 |
file_path = save_to_excel(results, keywords)
|
191 |
return results, file_path
|
192 |
|
|
|
194 |
fn=display_search_volumes,
|
195 |
inputs=[
|
196 |
gr.Textbox(placeholder="ํค์๋๋ฅผ ์
๋ ฅํ์ธ์"),
|
197 |
+
gr.Checkbox(label="์ฐ๊ด๊ฒ์์ด ํฌํจ", value=True) # ์ฐ๊ด๊ฒ์์ด ํ ๊ธ ์ถ๊ฐ
|
198 |
],
|
199 |
outputs=[
|
200 |
gr.Dataframe(headers=["ํค์๋", "PC์๊ฒ์๋", "๋ชจ๋ฐ์ผ์๊ฒ์๋", "ํ ํ์๊ฒ์๋", "๋ธ๋ก๊ทธ๋ฌธ์์"]),
|
201 |
gr.File(label="๋ค์ด๋ก๋ ์์
ํ์ผ")
|
202 |
],
|
203 |
title="๋ค์ด๋ฒ ์๊ฒ์๋ ๊ฒ์๊ธฐ",
|
204 |
+
description="ํค์๋์ ์ ๊ฒ์๋๊ณผ ๋ธ๋ก๊ทธ ๋ฌธ์ ์๋ฅผ ํ์ธํ ์ ์์ต๋๋ค. ์ฐ๊ด๊ฒ์์ด๋ฅผ ํฌํจํ ์ง ์ ํํ์ธ์.",
|
205 |
)
|
206 |
|
207 |
iface.launch(share=True) # share=True๋ฅผ ์ถ๊ฐํ์ฌ ๊ณต๊ฐ ๋งํฌ ์์ฑ
|