Kims12 commited on
Commit
8dc8873
ยท
verified ยท
1 Parent(s): a151030

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -40
app.py CHANGED
@@ -12,21 +12,12 @@ from concurrent.futures import ThreadPoolExecutor
12
  import os
13
  import tempfile
14
  from datetime import datetime
15
- from dotenv import load_dotenv # dotenv ์ถ”๊ฐ€
16
 
17
- # .env ํŒŒ์ผ์˜ ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
18
- load_dotenv()
19
-
20
- # ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ API ํ‚ค์™€ ์‹œํฌ๋ฆฟ ํ‚ค๋ฅผ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค.
21
  BASE_URL = "https://api.searchad.naver.com"
22
- API_KEY = os.environ.get("NAVER_API_KEY")
23
- SECRET_KEY = os.environ.get("NAVER_SECRET_KEY")
24
  CUSTOMER_ID = 2666992
25
 
26
- # ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ ํด๋ผ์ด์–ธํŠธ ID์™€ ์‹œํฌ๋ฆฟ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค.
27
- CLIENT_ID = os.environ.get("NAVER_CLIENT_ID")
28
- CLIENT_SECRET = os.environ.get("NAVER_CLIENT_SECRET")
29
-
30
  class NaverAPI:
31
  def __init__(self, base_url, api_key, secret_key, customer_id):
32
  self.base_url = base_url
@@ -66,25 +57,20 @@ class NaverAPI:
66
  return response.json()
67
 
68
  def get_blog_count(keyword):
69
- # ํด๋ผ์ด์–ธํŠธ ID์™€ ์‹œํฌ๋ฆฟ์„ ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค.
70
- client_id = CLIENT_ID
71
- client_secret = CLIENT_SECRET
72
  encText = urllib.parse.quote(keyword)
73
  url = "https://openapi.naver.com/v1/search/blog?query=" + encText
74
  request = urllib.request.Request(url)
75
  request.add_header("X-Naver-Client-Id", client_id)
76
  request.add_header("X-Naver-Client-Secret", client_secret)
77
- try:
78
- response = urllib.request.urlopen(request)
79
- rescode = response.getcode()
80
- if rescode == 200:
81
- response_body = response.read()
82
- data = json.loads(response_body.decode('utf-8'))
83
- return data['total']
84
- else:
85
- return 0
86
- except Exception as e:
87
- print(f"Error fetching blog count for keyword '{keyword}': {e}")
88
  return 0
89
 
90
  def get_keywords_data_chunk(chunk):
@@ -102,16 +88,13 @@ def get_monthly_search_volumes(keywords):
102
  with ThreadPoolExecutor(max_workers=5) as executor:
103
  futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
104
  for future in futures:
105
- try:
106
- data = future.result()
107
- if 'keywordList' in data:
108
- all_data.extend(data['keywordList'])
109
- except Exception as e:
110
- print(f"Error fetching keywords data chunk: {e}")
111
 
112
  if not all_data:
113
- return [("Error", "๋ฐ์ดํ„ฐ๊ฐ€ ๋ฐ˜ํ™˜๋˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ API ์‘๋‹ต์ด ์œ ํšจํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค.", "", "", "")] # ๋ธ”๋กœ๊ทธ ๋ฌธ์„œ ์ˆ˜ ์นผ๋Ÿผ ์ถ”๊ฐ€
114
-
115
  results = []
116
  unique_keywords = set()
117
  for item in all_data:
@@ -136,13 +119,9 @@ def get_monthly_search_volumes(keywords):
136
  with ThreadPoolExecutor(max_workers=5) as executor:
137
  blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
138
  for i, future in enumerate(blog_futures):
139
- try:
140
- keyword, blog_count = future.result()
141
- results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], blog_count)
142
- except Exception as e:
143
- print(f"Error fetching blog count for keyword '{results[i][0]}': {e}")
144
- results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], "Error")
145
-
146
  return results
147
 
148
  def save_to_excel(results, keyword):
 
12
  import os
13
  import tempfile
14
  from datetime import datetime
 
15
 
 
 
 
 
16
  BASE_URL = "https://api.searchad.naver.com"
17
+ API_KEY = "010000000046604df3a0f6abf4c52824e0d5835c5cbeae279ced8b2bb9007b3cc566b190c7"
18
+ SECRET_KEY = "AQAAAABGYE3zoPar9MUoJODVg1xczNEcSuIBi66wWUy4p4gs/Q=="
19
  CUSTOMER_ID = 2666992
20
 
 
 
 
 
21
  class NaverAPI:
22
  def __init__(self, base_url, api_key, secret_key, customer_id):
23
  self.base_url = base_url
 
57
  return response.json()
58
 
59
  def get_blog_count(keyword):
60
+ client_id = "421ZKFMM5TS1xmvsF7C0"
61
+ client_secret = "h47UQHAOGV"
 
62
  encText = urllib.parse.quote(keyword)
63
  url = "https://openapi.naver.com/v1/search/blog?query=" + encText
64
  request = urllib.request.Request(url)
65
  request.add_header("X-Naver-Client-Id", client_id)
66
  request.add_header("X-Naver-Client-Secret", client_secret)
67
+ response = urllib.request.urlopen(request)
68
+ rescode = response.getcode()
69
+ if rescode == 200:
70
+ response_body = response.read()
71
+ data = json.loads(response_body.decode('utf-8'))
72
+ return data['total']
73
+ else:
 
 
 
 
74
  return 0
75
 
76
  def get_keywords_data_chunk(chunk):
 
88
  with ThreadPoolExecutor(max_workers=5) as executor:
89
  futures = [executor.submit(get_keywords_data_chunk, keywords[i:i+chunk_size]) for i in range(0, len(keywords), chunk_size)]
90
  for future in futures:
91
+ data = future.result()
92
+ if 'keywordList' in data:
93
+ all_data.extend(data['keywordList'])
 
 
 
94
 
95
  if not all_data:
96
+ return [("Error", "No data returned or invalid response from API", "", "", "")] # ๋ธ”๋กœ๊ทธ ๋ฌธ์„œ ์ˆ˜ ์นผ๋Ÿผ ์ถ”๊ฐ€
97
+
98
  results = []
99
  unique_keywords = set()
100
  for item in all_data:
 
119
  with ThreadPoolExecutor(max_workers=5) as executor:
120
  blog_futures = [executor.submit(get_blog_count_parallel, result[0]) for result in results]
121
  for i, future in enumerate(blog_futures):
122
+ keyword, blog_count = future.result()
123
+ results[i] = (results[i][0], results[i][1], results[i][2], results[i][3], blog_count)
124
+
 
 
 
 
125
  return results
126
 
127
  def save_to_excel(results, keyword):