import os import feedparser from langchain.vectorstores import Chroma from langchain.embeddings import HuggingFaceEmbeddings from langchain.docstore.document import Document import logging from huggingface_hub import HfApi, login, snapshot_download import shutil import rss_feeds from datetime import datetime, date import dateutil.parser import hashlib import re # Setup logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Constants MAX_ARTICLES_PER_FEED = 10 RSS_FEEDS = rss_feeds.RSS_FEEDS COLLECTION_NAME = "news_articles" HF_API_TOKEN = os.getenv("DEMO_HF_API_TOKEN", "YOUR_HF_API_TOKEN") REPO_ID = "broadfield-dev/news-rag-db" # Initialize Hugging Face API login(token=HF_API_TOKEN) hf_api = HfApi() def get_embedding_model(): """Returns a singleton instance of the embedding model to avoid reloading.""" if not hasattr(get_embedding_model, "model"): get_embedding_model.model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") return get_embedding_model.model def get_daily_db_dir(): """Returns the path for today's Chroma DB.""" return f"chroma_db_{date.today().isoformat()}" def clean_text(text): """Clean text by removing HTML tags and extra whitespace.""" if not text or not isinstance(text, str): return "" text = re.sub(r'<.*?>', '', text) text = ' '.join(text.split()) return text.strip().lower() def fetch_rss_feeds(): articles = [] seen_keys = set() for feed_url in RSS_FEEDS: try: logger.info(f"Fetching {feed_url}") feed = feedparser.parse(feed_url) if feed.bozo: logger.warning(f"Parse error for {feed_url}: {feed.bozo_exception}") continue article_count = 0 for entry in feed.entries: if article_count >= MAX_ARTICLES_PER_FEED: break title = entry.get("title", "No Title") link = entry.get("link", "") description = entry.get("summary", entry.get("description", "")) title = clean_text(title) link = clean_text(link) description = clean_text(description) published = "Unknown Date" for date_field in ["published", "updated", "created", "pubDate"]: if date_field in entry: try: parsed_date = dateutil.parser.parse(entry[date_field]) published = parsed_date.strftime("%Y-%m-%d %H:%M:%S") break except (ValueError, TypeError) as e: logger.debug(f"Failed to parse {date_field} '{entry[date_field]}': {e}") continue description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() key = f"{title}|{link}|{published}|{description_hash}" if key not in seen_keys: seen_keys.add(key) image = "svg" for img_source in [ lambda e: clean_text(e.get("media_content", [{}])[0].get("url")) if e.get("media_content") else "", lambda e: clean_text(e.get("media_thumbnail", [{}])[0].get("url")) if e.get("media_thumbnail") else "", lambda e: clean_text(e.get("enclosure", {}).get("url")) if e.get("enclosure") else "", lambda e: clean_text(next((lnk.get("href") for lnk in e.get("links", []) if lnk.get("type", "").startswith("image")), "")), ]: try: img = img_source(entry) if img and img.strip(): image = img break except (IndexError, AttributeError, TypeError): continue articles.append({ "title": title, "link": link, "description": description, "published": published, "category": categorize_feed(feed_url), "image": image, }) article_count += 1 except Exception as e: logger.error(f"Error fetching {feed_url}: {e}") logger.info(f"Total articles fetched: {len(articles)}") return articles def categorize_feed(url): """Categorize an RSS feed based on its URL.""" if not url or not isinstance(url, str): logger.warning(f"Invalid URL provided for categorization: {url}") return "Uncategorized" url = url.lower().strip() logger.debug(f"Categorizing URL: {url}") if any(keyword in url for keyword in ["nature", "science.org", "arxiv.org", "plos.org", "annualreviews.org", "journals.uchicago.edu", "jneurosci.org", "cell.com", "nejm.org", "lancet.com"]): return "Academic Papers" elif any(keyword in url for keyword in ["reuters.com/business", "bloomberg.com", "ft.com", "marketwatch.com", "cnbc.com", "foxbusiness.com", "wsj.com", "bworldonline.com", "economist.com", "forbes.com"]): return "Business" elif any(keyword in url for keyword in ["investing.com", "cnbc.com/market", "marketwatch.com/market", "fool.co.uk", "zacks.com", "seekingalpha.com", "barrons.com", "yahoofinance.com"]): return "Stocks & Markets" elif any(keyword in url for keyword in ["whitehouse.gov", "state.gov", "commerce.gov", "transportation.gov", "ed.gov", "dol.gov", "justice.gov", "federalreserve.gov", "occ.gov", "sec.gov", "bls.gov", "usda.gov", "gao.gov", "cbo.gov", "fema.gov", "defense.gov", "hhs.gov", "energy.gov", "interior.gov"]): return "Federal Government" elif any(keyword in url for keyword in ["weather.gov", "metoffice.gov.uk", "accuweather.com", "weatherunderground.com", "noaa.gov", "wunderground.com", "climate.gov", "ecmwf.int", "bom.gov.au"]): return "Weather" elif any(keyword in url for keyword in ["data.worldbank.org", "imf.org", "un.org", "oecd.org", "statista.com", "kff.org", "who.int", "cdc.gov", "bea.gov", "census.gov", "fdic.gov"]): return "Data & Statistics" elif any(keyword in url for keyword in ["nasa", "spaceweatherlive", "space", "universetoday", "skyandtelescope", "esa"]): return "Space" elif any(keyword in url for keyword in ["sciencedaily", "quantamagazine", "smithsonianmag", "popsci", "discovermagazine", "scientificamerican", "newscientist", "livescience", "atlasobscura"]): return "Science" elif any(keyword in url for keyword in ["wired", "techcrunch", "arstechnica", "gizmodo", "theverge"]): return "Tech" elif any(keyword in url for keyword in ["horoscope", "astrostyle"]): return "Astrology" elif any(keyword in url for keyword in ["cnn_allpolitics", "bbci.co.uk/news/politics", "reuters.com/arc/outboundfeeds/newsletter-politics", "politico.com/rss/politics", "thehill"]): return "Politics" elif any(keyword in url for keyword in ["weather", "swpc.noaa.gov", "foxweather"]): return "Earth Weather" elif "vogue" in url: return "Lifestyle" elif any(keyword in url for keyword in ["phys.org", "aps.org", "physicsworld"]): return "Physics" else: logger.warning(f"No matching category found for URL: {url}") return "Uncategorized" def process_and_store_articles(articles): db_path = get_daily_db_dir() vector_db = Chroma( persist_directory=db_path, embedding_function=get_embedding_model(), collection_name=COLLECTION_NAME ) try: existing_ids = set(vector_db.get(include=[])["ids"]) except Exception: existing_ids = set() docs_to_add = [] ids_to_add = [] for article in articles: try: title = clean_text(article["title"]) link = clean_text(article["link"]) description = clean_text(article["description"]) published = article["published"] description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() doc_id = f"{title}|{link}|{published}|{description_hash}" if doc_id in existing_ids: logger.debug(f"Skipping duplicate in DB {db_path}: {doc_id}") continue metadata = { "title": article["title"], "link": article["link"], "original_description": article["description"], "published": article["published"], "category": article["category"], "image": article["image"], } doc = Document(page_content=description, metadata=metadata) docs_to_add.append(doc) ids_to_add.append(doc_id) existing_ids.add(doc_id) except Exception as e: logger.error(f"Error processing article {article.get('title', 'N/A')}: {e}") if docs_to_add: try: vector_db.add_documents(documents=docs_to_add, ids=ids_to_add) vector_db.persist() logger.info(f"Added {len(docs_to_add)} new articles to DB {db_path}. Total in DB: {vector_db._collection.count()}") except Exception as e: logger.error(f"Error storing articles in {db_path}: {e}") def download_from_hf_hub(): try: hf_api.create_repo(repo_id=REPO_ID, repo_type="dataset", exist_ok=True, token=HF_API_TOKEN) logger.info(f"Downloading all DBs from {REPO_ID}...") snapshot_download( repo_id=REPO_ID, repo_type="dataset", local_dir=".", local_dir_use_symlinks=False, allow_patterns="chroma_db_*/**", token=HF_API_TOKEN ) logger.info("Finished downloading DBs.") except Exception as e: logger.error(f"Error downloading from Hugging Face Hub: {e}") def upload_to_hf_hub(): db_path = get_daily_db_dir() if os.path.exists(db_path): try: logger.info(f"Uploading updated Chroma DB '{db_path}' to {REPO_ID}...") hf_api.upload_folder( folder_path=db_path, path_in_repo=db_path, repo_id=REPO_ID, repo_type="dataset", token=HF_API_TOKEN ) logger.info(f"Database folder '{db_path}' uploaded to: {REPO_ID}") except Exception as e: logger.error(f"Error uploading to Hugging Face Hub: {e}") if __name__ == "__main__": download_from_hf_hub() articles = fetch_rss_feeds() process_and_store_articles(articles) upload_to_hf_hub()