Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,10 @@ from concurrent.futures import ThreadPoolExecutor
|
|
12 |
import os
|
13 |
import tempfile
|
14 |
from datetime import datetime
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# ํ๊ฒฝ ๋ณ์์์ API ํค์ ์ํฌ๋ฆฟ ํค๋ฅผ ๋ถ๋ฌ์ต๋๋ค.
|
17 |
BASE_URL = "https://api.searchad.naver.com"
|
@@ -19,6 +23,14 @@ API_KEY = os.environ.get("NAVER_API_KEY")
|
|
19 |
SECRET_KEY = os.environ.get("NAVER_SECRET_KEY")
|
20 |
CUSTOMER_ID = 2666992
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
class NaverAPI:
|
23 |
def __init__(self, base_url, api_key, secret_key, customer_id):
|
24 |
self.base_url = base_url
|
@@ -58,20 +70,25 @@ class NaverAPI:
|
|
58 |
return response.json()
|
59 |
|
60 |
def get_blog_count(keyword):
|
61 |
-
|
62 |
-
|
|
|
63 |
encText = urllib.parse.quote(keyword)
|
64 |
url = "https://openapi.naver.com/v1/search/blog?query=" + encText
|
65 |
request = urllib.request.Request(url)
|
66 |
request.add_header("X-Naver-Client-Id", client_id)
|
67 |
request.add_header("X-Naver-Client-Secret", client_secret)
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
75 |
return 0
|
76 |
|
77 |
def get_keywords_data_chunk(chunk):
|
@@ -89,9 +106,12 @@ def get_monthly_search_volumes(keywords):
|
|
89 |
with ThreadPoolExecutor(max_workers=5) as executor:
|
90 |
futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
|
91 |
for future in futures:
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
95 |
|
96 |
if not all_data:
|
97 |
return [("Error", "๋ฐ์ดํฐ๊ฐ ๋ฐํ๋์ง ์์๊ฑฐ๋ API ์๋ต์ด ์ ํจํ์ง ์์ต๋๋ค.", "", "", "")] # ๋ธ๋ก๊ทธ ๋ฌธ์ ์ ์นผ๋ผ ์ถ๊ฐ
|
@@ -120,9 +140,13 @@ def get_monthly_search_volumes(keywords):
|
|
120 |
with ThreadPoolExecutor(max_workers=5) as executor:
|
121 |
blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
|
122 |
for i, future in enumerate(blog_futures):
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
126 |
return results
|
127 |
|
128 |
def save_to_excel(results, keyword):
|
@@ -150,4 +174,4 @@ iface = gr.Interface(
|
|
150 |
title="๋ค์ด๋ฒ ์๊ฒ์๋ ๊ฒ์๊ธฐ",
|
151 |
)
|
152 |
|
153 |
-
iface.launch()
|
|
|
12 |
import os
|
13 |
import tempfile
|
14 |
from datetime import datetime
|
15 |
+
from dotenv import load_dotenv # dotenv ์ถ๊ฐ
|
16 |
+
|
17 |
+
# .env ํ์ผ์ ํ๊ฒฝ ๋ณ์๋ฅผ ๋ก๋ํฉ๋๋ค.
|
18 |
+
load_dotenv()
|
19 |
|
20 |
# ํ๊ฒฝ ๋ณ์์์ API ํค์ ์ํฌ๋ฆฟ ํค๋ฅผ ๋ถ๋ฌ์ต๋๋ค.
|
21 |
BASE_URL = "https://api.searchad.naver.com"
|
|
|
23 |
SECRET_KEY = os.environ.get("NAVER_SECRET_KEY")
|
24 |
CUSTOMER_ID = 2666992
|
25 |
|
26 |
+
# ํ๊ฒฝ ๋ณ์์์ ํด๋ผ์ด์ธํธ ID์ ์ํฌ๋ฆฟ์ ๋ถ๋ฌ์ต๋๋ค.
|
27 |
+
CLIENT_ID = os.environ.get("NAVER_CLIENT_ID")
|
28 |
+
CLIENT_SECRET = os.environ.get("NAVER_CLIENT_SECRET")
|
29 |
+
|
30 |
+
# ํ๊ฒฝ ๋ณ์ ๋ก๋ ํ์ธ
|
31 |
+
if not API_KEY or not SECRET_KEY or not CLIENT_ID or not CLIENT_SECRET:
|
32 |
+
raise ValueError("ํ์ ํ๊ฒฝ ๋ณ์๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค. .env ํ์ผ์ ํ์ธํ์ธ์.")
|
33 |
+
|
34 |
class NaverAPI:
|
35 |
def __init__(self, base_url, api_key, secret_key, customer_id):
|
36 |
self.base_url = base_url
|
|
|
70 |
return response.json()
|
71 |
|
72 |
def get_blog_count(keyword):
|
73 |
+
# ํด๋ผ์ด์ธํธ ID์ ์ํฌ๋ฆฟ์ ํ๊ฒฝ ๋ณ์์์ ๋ถ๋ฌ์ต๋๋ค.
|
74 |
+
client_id = CLIENT_ID
|
75 |
+
client_secret = CLIENT_SECRET
|
76 |
encText = urllib.parse.quote(keyword)
|
77 |
url = "https://openapi.naver.com/v1/search/blog?query=" + encText
|
78 |
request = urllib.request.Request(url)
|
79 |
request.add_header("X-Naver-Client-Id", client_id)
|
80 |
request.add_header("X-Naver-Client-Secret", client_secret)
|
81 |
+
try:
|
82 |
+
response = urllib.request.urlopen(request)
|
83 |
+
rescode = response.getcode()
|
84 |
+
if rescode == 200:
|
85 |
+
response_body = response.read()
|
86 |
+
data = json.loads(response_body.decode('utf-8'))
|
87 |
+
return data['total']
|
88 |
+
else:
|
89 |
+
return 0
|
90 |
+
except Exception as e:
|
91 |
+
print(f"Error fetching blog count for keyword '{keyword}': {e}")
|
92 |
return 0
|
93 |
|
94 |
def get_keywords_data_chunk(chunk):
|
|
|
106 |
with ThreadPoolExecutor(max_workers=5) as executor:
|
107 |
futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
|
108 |
for future in futures:
|
109 |
+
try:
|
110 |
+
data = future.result()
|
111 |
+
if 'keywordList' in data:
|
112 |
+
all_data.extend(data['keywordList'])
|
113 |
+
except Exception as e:
|
114 |
+
print(f"Error fetching keywords data chunk: {e}")
|
115 |
|
116 |
if not all_data:
|
117 |
return [("Error", "๋ฐ์ดํฐ๊ฐ ๋ฐํ๋์ง ์์๊ฑฐ๋ API ์๋ต์ด ์ ํจํ์ง ์์ต๋๋ค.", "", "", "")] # ๋ธ๋ก๊ทธ ๋ฌธ์ ์ ์นผ๋ผ ์ถ๊ฐ
|
|
|
140 |
with ThreadPoolExecutor(max_workers=5) as executor:
|
141 |
blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
|
142 |
for i, future in enumerate(blog_futures):
|
143 |
+
try:
|
144 |
+
keyword, blog_count = future.result()
|
145 |
+
results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], blog_count)
|
146 |
+
except Exception as e:
|
147 |
+
print(f"Error fetching blog count for keyword '{results[i][0]}': {e}")
|
148 |
+
results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], "Error")
|
149 |
+
|
150 |
return results
|
151 |
|
152 |
def save_to_excel(results, keyword):
|
|
|
174 |
title="๋ค์ด๋ฒ ์๊ฒ์๋ ๊ฒ์๊ธฐ",
|
175 |
)
|
176 |
|
177 |
+
iface.launch(share=True) # share=True๋ฅผ ์ถ๊ฐํ์ฌ ๊ณต๊ฐ ๋งํฌ ์์ฑ
|