broadfield-dev commited on
Commit
e9d9741
·
verified ·
1 Parent(s): fefd925

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -257
app.py CHANGED
@@ -1,32 +1,41 @@
1
  import os
2
  import threading
3
  from flask import Flask, render_template, request, jsonify
4
- from rss_processor import fetch_rss_feeds, process_and_store_articles, download_from_hf_hub, upload_to_hf_hub, clean_text
5
  import logging
6
  import time
7
  from datetime import datetime
8
  import hashlib
9
- import glob
10
  from langchain.vectorstores import Chroma
11
  from langchain.embeddings import HuggingFaceEmbeddings
12
 
13
  app = Flask(__name__)
14
 
15
- # Setup logging
16
  logging.basicConfig(level=logging.INFO)
17
  logger = logging.getLogger(__name__)
18
 
19
- # Global flag to track background loading
20
- loading_complete = True # Start as True to allow initial rendering
21
  last_update_time = time.time()
22
- last_data_hash = None # Track the hash of the last data to detect changes
23
 
24
  def get_embedding_model():
25
- """Returns a singleton instance of the embedding model to avoid reloading."""
26
  if not hasattr(get_embedding_model, "model"):
27
  get_embedding_model.model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
28
  return get_embedding_model.model
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def load_feeds_in_background():
31
  global loading_complete, last_update_time
32
  try:
@@ -42,196 +51,117 @@ def load_feeds_in_background():
42
  finally:
43
  loading_complete = True
44
 
45
- def get_all_docs_from_dbs():
46
- """Aggregate documents and metadata from all Chroma DB folders."""
47
- all_docs = {'documents': [], 'metadatas': []}
48
- seen_ids = set()
49
- embedding_function = get_embedding_model()
50
-
51
- for db_path in glob.glob("chroma_db_*"):
52
- if not os.path.isdir(db_path):
53
- continue
54
- try:
55
- temp_vector_db = Chroma(
56
- persist_directory=db_path,
57
- embedding_function=embedding_function,
58
- collection_name="news_articles"
59
- )
60
- # Skip empty databases
61
- if temp_vector_db._collection.count() == 0:
62
- continue
63
-
64
- db_data = temp_vector_db.get(include=['documents', 'metadatas'])
65
- if db_data.get('documents') and db_data.get('metadatas'):
66
- for doc, meta in zip(db_data['documents'], db_data['metadatas']):
67
- # Use a more robust unique identifier
68
- doc_id = f"{meta.get('title', 'No Title')}|{meta.get('link', '')}|{meta.get('published', 'Unknown Date')}"
69
- if doc_id not in seen_ids:
70
- seen_ids.add(doc_id)
71
- all_docs['documents'].append(doc)
72
- all_docs['metadatas'].append(meta)
73
- except Exception as e:
74
- logger.error(f"Error loading DB {db_path}: {e}")
75
-
76
- return all_docs
77
 
78
  def compute_data_hash(categorized_articles):
79
- """Compute a hash of the current articles to detect changes."""
80
- if not categorized_articles:
81
- return ""
82
- # Create a sorted string representation of the articles for consistent hashing
83
  data_str = ""
84
  for cat, articles in sorted(categorized_articles.items()):
85
  for article in sorted(articles, key=lambda x: x["published"]):
86
  data_str += f"{cat}|{article['title']}|{article['link']}|{article['published']}|"
87
  return hashlib.sha256(data_str.encode('utf-8')).hexdigest()
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  @app.route('/')
90
  def index():
91
  global loading_complete, last_update_time, last_data_hash
92
 
93
- # Check if any DB exists; if not, download from Hugging Face
94
- if not glob.glob("chroma_db_*"):
95
- logger.info("No Chroma DBs found, downloading from Hugging Face Hub...")
96
  download_from_hf_hub()
97
 
98
- # Start background RSS feed update
99
  loading_complete = False
100
  threading.Thread(target=load_feeds_in_background, daemon=True).start()
101
 
102
- # Load existing data immediately
103
  try:
104
- all_docs = get_all_docs_from_dbs()
105
- total_docs = len(all_docs['documents'])
106
- logger.info(f"Total articles across all DBs at startup: {total_docs}")
107
- if not all_docs.get('metadatas'):
108
- logger.info("No articles in any DB yet")
109
  return render_template("index.html", categorized_articles={}, has_articles=False, loading=True)
110
 
111
- # Process and categorize articles with deduplication
112
- enriched_articles = []
113
- seen_keys = set()
114
- for doc, meta in zip(all_docs['documents'], all_docs['metadatas']):
115
- if not meta:
116
- continue
117
- title = meta.get("title", "No Title")
118
- link = meta.get("link", "")
119
- description = meta.get("original_description", "No Description")
120
- published = meta.get("published", "Unknown Date").strip()
121
-
122
- title = clean_text(title)
123
- link = clean_text(link)
124
- description = clean_text(description)
125
-
126
- description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest()
127
- key = f"{title}|{link}|{published}|{description_hash}"
128
- if key not in seen_keys:
129
- seen_keys.add(key)
130
- try:
131
- published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published
132
- except (ValueError, TypeError):
133
- published = "1970-01-01T00:00:00"
134
- enriched_articles.append({
135
- "title": title,
136
- "link": link,
137
- "description": description,
138
- "category": meta.get("category", "Uncategorized"),
139
- "published": published,
140
- "image": meta.get("image", "svg"),
141
- })
142
-
143
  enriched_articles.sort(key=lambda x: x["published"], reverse=True)
144
-
145
  categorized_articles = {}
146
  for article in enriched_articles:
147
  cat = article["category"]
148
- if cat not in categorized_articles:
149
- categorized_articles[cat] = []
150
- categorized_articles[cat].append(article)
151
-
152
- categorized_articles = dict(sorted(categorized_articles.items(), key=lambda x: x[0].lower()))
153
 
154
  for cat in categorized_articles:
155
- categorized_articles[cat] = sorted(categorized_articles[cat], key=lambda x: x["published"], reverse=True)[:10]
156
- if len(categorized_articles[cat]) >= 2:
157
- logger.debug(f"Category {cat} top 2: {categorized_articles[cat][0]['title']} | {categorized_articles[cat][1]['title']}")
158
 
159
- # Compute initial data hash
160
  last_data_hash = compute_data_hash(categorized_articles)
161
-
162
- logger.info(f"Displaying articles at startup: {sum(len(articles) for articles in categorized_articles.values())} total")
163
- return render_template("index.html",
164
- categorized_articles=categorized_articles,
165
- has_articles=True,
166
- loading=True)
167
  except Exception as e:
168
- logger.error(f"Error retrieving articles at startup: {e}")
169
  return render_template("index.html", categorized_articles={}, has_articles=False, loading=True)
170
 
171
  @app.route('/search', methods=['POST'])
172
  def search():
173
  query = request.form.get('search')
174
  if not query:
175
- logger.info("Empty search query received")
176
  return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False})
177
 
178
  try:
179
  logger.info(f"Performing semantic search for: '{query}'")
 
 
 
 
 
180
 
181
- embedding_function = get_embedding_model()
182
  enriched_articles = []
183
  seen_keys = set()
184
- db_paths = glob.glob("chroma_db_*")
185
-
186
- if not db_paths:
187
- logger.warning("No Chroma DBs found for search.")
188
- return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False})
189
-
190
- all_search_results = []
191
- for db_path in db_paths:
192
- if not os.path.isdir(db_path): continue
193
- try:
194
- vector_db = Chroma(
195
- persist_directory=db_path,
196
- embedding_function=embedding_function,
197
- collection_name="news_articles"
198
- )
199
- if vector_db._collection.count() > 0:
200
- results = vector_db.similarity_search_with_relevance_scores(query, k=20)
201
- all_search_results.extend(results)
202
- except Exception as e:
203
- logger.error(f"Error searching in DB {db_path}: {e}")
204
-
205
- # Sort all results by relevance score (higher is better)
206
- all_search_results.sort(key=lambda x: x[1], reverse=True)
207
-
208
- # Process and deduplicate top results
209
- for doc, score in all_search_results:
210
  meta = doc.metadata
211
- title = clean_text(meta.get("title", "No Title"))
212
- link = clean_text(meta.get("link", ""))
213
- description = clean_text(meta.get("original_description", "No Description"))
214
- published = meta.get("published", "Unknown Date").strip()
215
-
216
- description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest()
217
- key = f"{title}|{link}|{published}|{description_hash}"
218
-
219
  if key not in seen_keys:
220
  seen_keys.add(key)
221
  enriched_articles.append({
222
- "title": meta.get("title", "No Title"),
223
- "link": meta.get("link", ""),
224
  "description": meta.get("original_description", "No Description"),
225
  "category": meta.get("category", "Uncategorized"),
226
- "published": published,
227
  "image": meta.get("image", "svg"),
228
  })
229
 
230
- logger.info(f"Found {len(enriched_articles)} unique articles from semantic search.")
231
- if not enriched_articles:
232
- return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False})
233
-
234
- # Categorize the articles
235
  categorized_articles = {}
236
  for article in enriched_articles:
237
  cat = article["category"]
@@ -246,141 +176,58 @@ def search():
246
  logger.error(f"Semantic search error: {e}", exc_info=True)
247
  return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}), 500
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
  @app.route('/check_loading')
251
  def check_loading():
252
  global loading_complete, last_update_time
253
- if loading_complete:
254
- return jsonify({"status": "complete", "last_update": last_update_time})
255
- return jsonify({"status": "loading"}), 202
256
 
257
  @app.route('/get_updates')
258
  def get_updates():
259
  global last_update_time, last_data_hash
260
  try:
261
- all_docs = get_all_docs_from_dbs()
262
- if not all_docs.get('metadatas'):
263
- return jsonify({"articles": [], "last_update": last_update_time, "has_updates": False})
264
-
265
- enriched_articles = []
266
- seen_keys = set()
267
- for doc, meta in zip(all_docs['documents'], all_docs['metadatas']):
268
- if not meta:
269
- continue
270
- title = meta.get("title", "No Title")
271
- link = meta.get("link", "")
272
- description = meta.get("original_description", "No Description")
273
- published = meta.get("published", "Unknown Date").strip()
274
-
275
- title = clean_text(title)
276
- link = clean_text(link)
277
- description = clean_text(description)
278
-
279
- description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest()
280
- key = f"{title}|{link}|{published}|{description_hash}"
281
- if key not in seen_keys:
282
- seen_keys.add(key)
283
- try:
284
- published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published
285
- except (ValueError, TypeError):
286
- published = "1970-01-01T00:00:00"
287
- enriched_articles.append({
288
- "title": title,
289
- "link": link,
290
- "description": description,
291
- "category": meta.get("category", "Uncategorized"),
292
- "published": published,
293
- "image": meta.get("image", "svg"),
294
- })
295
-
296
- enriched_articles.sort(key=lambda x: x["published"], reverse=True)
297
  categorized_articles = {}
298
  for article in enriched_articles:
299
  cat = article["category"]
300
- if cat not in categorized_articles:
301
- categorized_articles[cat] = []
302
- key = f"{article['title']}|{article['link']}|{article['published']}"
303
- if key not in [f"{a['title']}|{a['link']}|{a['published']}" for a in categorized_articles[cat]]:
304
- categorized_articles[cat].append(article)
305
-
306
  for cat in categorized_articles:
307
- unique_articles = []
308
- seen_cat_keys = set()
309
- for article in sorted(categorized_articles[cat], key=lambda x: x["published"], reverse=True):
310
- key = f"{clean_text(article['title'])}|{clean_text(article['link'])}|{article['published']}"
311
- if key not in seen_cat_keys:
312
- seen_cat_keys.add(key)
313
- unique_articles.append(article)
314
- categorized_articles[cat] = unique_articles[:10]
315
 
316
- # Compute hash of new data
317
  current_data_hash = compute_data_hash(categorized_articles)
318
-
319
- # Compare with last data hash to determine if there are updates
320
  has_updates = last_data_hash != current_data_hash
 
321
  if has_updates:
322
  logger.info("New RSS data detected, sending updates to frontend")
323
  last_data_hash = current_data_hash
324
- return jsonify({
325
- "articles": categorized_articles,
326
- "last_update": last_update_time,
327
- "has_updates": True
328
- })
329
  else:
330
- logger.info("No new RSS data, skipping update")
331
- return jsonify({
332
- "articles": {},
333
- "last_update": last_update_time,
334
- "has_updates": False
335
- })
336
  except Exception as e:
337
  logger.error(f"Error fetching updates: {e}")
338
  return jsonify({"articles": {}, "last_update": last_update_time, "has_updates": False}), 500
339
 
340
- @app.route('/get_all_articles/<category>')
341
- def get_all_articles(category):
342
- try:
343
- all_docs = get_all_docs_from_dbs()
344
- if not all_docs.get('metadatas'):
345
- return jsonify({"articles": [], "category": category})
346
-
347
- enriched_articles = []
348
- seen_keys = set()
349
- for doc, meta in zip(all_docs['documents'], all_docs['metadatas']):
350
- if not meta or meta.get("category") != category:
351
- continue
352
- title = meta.get("title", "No Title")
353
- link = meta.get("link", "")
354
- description = meta.get("original_description", "No Description")
355
- published = meta.get("published", "Unknown Date").strip()
356
-
357
- title = clean_text(title)
358
- link = clean_text(link)
359
- description = clean_text(description)
360
-
361
- description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest()
362
- key = f"{title}|{link}|{published}|{description_hash}"
363
- if key not in seen_keys:
364
- seen_keys.add(key)
365
- try:
366
- published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published
367
- except (ValueError, TypeError):
368
- published = "1970-01-01T00:00:00"
369
- enriched_articles.append({
370
- "title": title,
371
- "link": link,
372
- "description": description,
373
- "category": meta.get("category", "Uncategorized"),
374
- "published": published,
375
- "image": meta.get("image", "svg"),
376
- })
377
-
378
- enriched_articles.sort(key=lambda x: x["published"], reverse=True)
379
- return jsonify({"articles": enriched_articles, "category": category})
380
- except Exception as e:
381
- logger.error(f"Error fetching all articles for category {category}: {e}")
382
- return jsonify({"articles": [], "category": category}), 500
383
-
384
  @app.route('/card')
385
  def card_load():
386
  return render_template("card.html")
 
1
  import os
2
  import threading
3
  from flask import Flask, render_template, request, jsonify
4
+ from rss_processor import fetch_rss_feeds, process_and_store_articles, download_from_hf_hub, upload_to_hf_hub, clean_text, LOCAL_DB_DIR
5
  import logging
6
  import time
7
  from datetime import datetime
8
  import hashlib
 
9
  from langchain.vectorstores import Chroma
10
  from langchain.embeddings import HuggingFaceEmbeddings
11
 
12
  app = Flask(__name__)
13
 
 
14
  logging.basicConfig(level=logging.INFO)
15
  logger = logging.getLogger(__name__)
16
 
17
+ loading_complete = True
 
18
  last_update_time = time.time()
19
+ last_data_hash = None
20
 
21
  def get_embedding_model():
 
22
  if not hasattr(get_embedding_model, "model"):
23
  get_embedding_model.model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
24
  return get_embedding_model.model
25
 
26
+ def get_vector_db():
27
+ if not os.path.exists(LOCAL_DB_DIR):
28
+ return None
29
+ try:
30
+ return Chroma(
31
+ persist_directory=LOCAL_DB_DIR,
32
+ embedding_function=get_embedding_model(),
33
+ collection_name="news_articles"
34
+ )
35
+ except Exception as e:
36
+ logger.error(f"Failed to load vector DB: {e}")
37
+ return None
38
+
39
  def load_feeds_in_background():
40
  global loading_complete, last_update_time
41
  try:
 
51
  finally:
52
  loading_complete = True
53
 
54
+ def get_all_docs_from_db():
55
+ vector_db = get_vector_db()
56
+ if not vector_db or vector_db._collection.count() == 0:
57
+ return {'documents': [], 'metadatas': []}
58
+ return vector_db.get(include=['documents', 'metadatas'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  def compute_data_hash(categorized_articles):
61
+ if not categorized_articles: return ""
 
 
 
62
  data_str = ""
63
  for cat, articles in sorted(categorized_articles.items()):
64
  for article in sorted(articles, key=lambda x: x["published"]):
65
  data_str += f"{cat}|{article['title']}|{article['link']}|{article['published']}|"
66
  return hashlib.sha256(data_str.encode('utf-8')).hexdigest()
67
 
68
+ def process_docs_into_articles(docs_data):
69
+ enriched_articles = []
70
+ seen_keys = set()
71
+ for doc, meta in zip(docs_data['documents'], docs_data['metadatas']):
72
+ if not meta: continue
73
+ title = meta.get("title", "No Title")
74
+ link = meta.get("link", "")
75
+ description = meta.get("original_description", "No Description")
76
+ published = meta.get("published", "Unknown Date").strip()
77
+
78
+ key = f"{title}|{link}|{published}"
79
+ if key not in seen_keys:
80
+ seen_keys.add(key)
81
+ try:
82
+ published_iso = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat()
83
+ except (ValueError, TypeError):
84
+ published_iso = "1970-01-01T00:00:00"
85
+
86
+ enriched_articles.append({
87
+ "title": title,
88
+ "link": link,
89
+ "description": description,
90
+ "category": meta.get("category", "Uncategorized"),
91
+ "published": published_iso,
92
+ "image": meta.get("image", "svg"),
93
+ })
94
+ return enriched_articles
95
+
96
  @app.route('/')
97
  def index():
98
  global loading_complete, last_update_time, last_data_hash
99
 
100
+ if not os.path.exists(LOCAL_DB_DIR):
101
+ logger.info(f"No Chroma DB found at '{LOCAL_DB_DIR}', downloading from Hugging Face Hub...")
 
102
  download_from_hf_hub()
103
 
 
104
  loading_complete = False
105
  threading.Thread(target=load_feeds_in_background, daemon=True).start()
106
 
 
107
  try:
108
+ all_docs = get_all_docs_from_db()
109
+ if not all_docs['metadatas']:
110
+ logger.info("No articles in the DB yet")
 
 
111
  return render_template("index.html", categorized_articles={}, has_articles=False, loading=True)
112
 
113
+ enriched_articles = process_docs_into_articles(all_docs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  enriched_articles.sort(key=lambda x: x["published"], reverse=True)
115
+
116
  categorized_articles = {}
117
  for article in enriched_articles:
118
  cat = article["category"]
119
+ categorized_articles.setdefault(cat, []).append(article)
120
+
121
+ categorized_articles = dict(sorted(categorized_articles.items()))
 
 
122
 
123
  for cat in categorized_articles:
124
+ categorized_articles[cat] = categorized_articles[cat][:10]
 
 
125
 
 
126
  last_data_hash = compute_data_hash(categorized_articles)
127
+
128
+ return render_template("index.html", categorized_articles=categorized_articles, has_articles=True, loading=True)
 
 
 
 
129
  except Exception as e:
130
+ logger.error(f"Error retrieving articles at startup: {e}", exc_info=True)
131
  return render_template("index.html", categorized_articles={}, has_articles=False, loading=True)
132
 
133
  @app.route('/search', methods=['POST'])
134
  def search():
135
  query = request.form.get('search')
136
  if not query:
 
137
  return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False})
138
 
139
  try:
140
  logger.info(f"Performing semantic search for: '{query}'")
141
+ vector_db = get_vector_db()
142
+ if not vector_db:
143
+ return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False})
144
+
145
+ results = vector_db.similarity_search_with_relevance_scores(query, k=50)
146
 
 
147
  enriched_articles = []
148
  seen_keys = set()
149
+ for doc, score in results:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  meta = doc.metadata
151
+ title = meta.get("title", "No Title")
152
+ link = meta.get("link", "")
153
+ key = f"{title}|{link}|{meta.get('published', '')}"
 
 
 
 
 
154
  if key not in seen_keys:
155
  seen_keys.add(key)
156
  enriched_articles.append({
157
+ "title": title,
158
+ "link": link,
159
  "description": meta.get("original_description", "No Description"),
160
  "category": meta.get("category", "Uncategorized"),
161
+ "published": meta.get("published", "Unknown Date"),
162
  "image": meta.get("image", "svg"),
163
  })
164
 
 
 
 
 
 
165
  categorized_articles = {}
166
  for article in enriched_articles:
167
  cat = article["category"]
 
176
  logger.error(f"Semantic search error: {e}", exc_info=True)
177
  return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}), 500
178
 
179
+ @app.route('/get_all_articles/<category>')
180
+ def get_all_articles(category):
181
+ try:
182
+ all_docs = get_all_docs_from_db()
183
+ enriched_articles = process_docs_into_articles(all_docs)
184
+
185
+ category_articles = [
186
+ article for article in enriched_articles if article["category"] == category
187
+ ]
188
+
189
+ category_articles.sort(key=lambda x: x["published"], reverse=True)
190
+ return jsonify({"articles": category_articles, "category": category})
191
+ except Exception as e:
192
+ logger.error(f"Error fetching all articles for category {category}: {e}")
193
+ return jsonify({"articles": [], "category": category}), 500
194
 
195
  @app.route('/check_loading')
196
  def check_loading():
197
  global loading_complete, last_update_time
198
+ return jsonify({"status": "complete" if loading_complete else "loading", "last_update": last_update_time})
 
 
199
 
200
  @app.route('/get_updates')
201
  def get_updates():
202
  global last_update_time, last_data_hash
203
  try:
204
+ all_docs = get_all_docs_from_db()
205
+ if not all_docs['metadatas']:
206
+ return jsonify({"articles": {}, "last_update": last_update_time, "has_updates": False})
207
+
208
+ enriched_articles = process_docs_into_articles(all_docs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  categorized_articles = {}
210
  for article in enriched_articles:
211
  cat = article["category"]
212
+ categorized_articles.setdefault(cat, []).append(article)
213
+
 
 
 
 
214
  for cat in categorized_articles:
215
+ categorized_articles[cat].sort(key=lambda x: x["published"], reverse=True)
216
+ categorized_articles[cat] = categorized_articles[cat][:10]
 
 
 
 
 
 
217
 
 
218
  current_data_hash = compute_data_hash(categorized_articles)
 
 
219
  has_updates = last_data_hash != current_data_hash
220
+
221
  if has_updates:
222
  logger.info("New RSS data detected, sending updates to frontend")
223
  last_data_hash = current_data_hash
224
+ return jsonify({"articles": categorized_articles, "last_update": last_update_time, "has_updates": True})
 
 
 
 
225
  else:
226
+ return jsonify({"articles": {}, "last_update": last_update_time, "has_updates": False})
 
 
 
 
 
227
  except Exception as e:
228
  logger.error(f"Error fetching updates: {e}")
229
  return jsonify({"articles": {}, "last_update": last_update_time, "has_updates": False}), 500
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  @app.route('/card')
232
  def card_load():
233
  return render_template("card.html")