# MetaDiscovery Agent - LOC API with Enhanced Completeness and Quality Analysis import requests import pandas as pd import numpy as np import streamlit as st import matplotlib import plotly.express as px from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity # Custom CSS for white background, styled sidebar, banner, and dark grey font st.markdown(""" """, unsafe_allow_html=True) # OPTION 1: Use an image from a URL for the banner st.image("https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/7ThcAOjbuM8ajrP85bGs4.jpeg", use_container_width=True) # Streamlit app header st.title("MetaDiscovery Agent for Library of Congress Collections") st.markdown(""" This tool connects to the LOC API, retrieves metadata from a selected collection, and performs an analysis of metadata completeness, suggests enhancements, and identifies authority gaps. """) # Updated collection URLs using the correct LOC API format collections = { "American Revolutionary War Maps": "american+revolutionary+war+maps", "Civil War Maps": "civil+war+maps", "Women's Suffrage": "women+suffrage", "World War I Posters": "world+war+posters" } # Sidebar for selecting collection #st.sidebar.markdown("## Settings") # Create empty metadata_df variable to ensure it exists before checking metadata_df = pd.DataFrame() # Add a key to the selectbox to ensure it refreshes properly selected = st.sidebar.selectbox("Select a collection", list(collections.keys()), key="collection_selector") search_query = collections[selected] # Define the collection URL collection_url = f"https://www.loc.gov/search/?q={search_query}&fo=json" # đ Field Completeness Breakdown (green-styled, placed above links) st.sidebar.markdown("### đ Field Completeness Breakdown", unsafe_allow_html=True) try: styled_table = completeness_table.style.background_gradient(cmap="Greens").format("{:.1f}%") st.sidebar.dataframe(styled_table, use_container_width=True, height=200) except Exception as e: st.sidebar.warning("Could not style completeness table.") st.sidebar.dataframe(completeness_table) # Create an empty placeholder for Quick Stats stats_placeholder = st.sidebar.empty() # Helpful Resources (styled and moved below dropdown) st.sidebar.markdown("### Helpful Resources", unsafe_allow_html=True) # Helpful Resources styled section st.sidebar.markdown("""
""", unsafe_allow_html=True) # Add a fetch button to make the action explicit fetch_data = True if fetch_data: # Display a loading spinner while fetching data with st.spinner(f"Fetching data for {selected}..."): # Fetch data from LOC API with spoofed User-Agent header headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/110.0.0.0 Safari/537.36" } try: response = requests.get(collection_url, headers=headers) response.raise_for_status() data = response.json() if "results" in data: records = data.get("results", []) elif "items" in data: records = data.get("items", []) else: records = [] st.error("Unexpected API response structure. No records found.") st.write(f"Retrieved {len(records)} records") except requests.exceptions.RequestException as e: st.error(f"API Connection Error: {e}") records = [] except ValueError: st.error("Failed to parse API response as JSON") records = [] # Extract selected metadata fields items = [] for record in records: if isinstance(record, dict): description = record.get("description", "") if isinstance(description, list): description = " ".join([str(d) for d in description]) item = { "id": record.get("id", ""), "title": record.get("title", ""), "date": record.get("date", ""), "subject": ", ".join(record.get("subject", [])) if isinstance(record.get("subject"), list) else record.get("subject", ""), "creator": record.get("creator", ""), "description": description } if not item["title"] and "item" in record: item["title"] = record.get("item", {}).get("title", "") if not item["date"] and "item" in record: item["date"] = record.get("item", {}).get("date", "") items.append(item) metadata_df = pd.DataFrame(items) # Define custom completeness check def is_incomplete(value): return pd.isna(value) or value in ["", "N/A", "null", None] if not metadata_df.empty: # Incomplete record detection incomplete_mask = metadata_df.apply(lambda row: row.map(is_incomplete), axis=1).any(axis=1) incomplete_count = incomplete_mask.sum() # Overall completeness total_fields = metadata_df.size filled_fields = metadata_df.apply(lambda row: row.map(lambda x: not is_incomplete(x)), axis=1).sum().sum() overall_percent = (filled_fields / total_fields) * 100 # Field-by-field completeness completeness = metadata_df.applymap(lambda x: not is_incomplete(x)).mean() * 100 completeness_table = completeness.round(1).to_frame(name="Completeness (%)") # Render stats summary in sidebar stats_html = f""" """ stats_placeholder.markdown(stats_html, unsafe_allow_html=True) # đ Field Completeness Breakdown st.sidebar.markdown("### đ Field Completeness Breakdown", unsafe_allow_html=True) st.sidebar.dataframe(completeness_table.style.format("{:.1f}%")) # Utility functions for deeper metadata quality analysis def is_incomplete(value): return pd.isna(value) or value in ["", "N/A", "null", None] def is_valid_date(value): try: pd.to_datetime(value) return True except: return False if not metadata_df.empty: st.subheader("Retrieved Metadata Sample") st.dataframe(metadata_df.head()) # Metadata completeness analysis (enhanced) st.subheader("Metadata Completeness Analysis") completeness = metadata_df.map(lambda x: not is_incomplete(x)).mean() * 100 completeness_df = pd.DataFrame({ "Field": completeness.index, "Completeness (%)": completeness.values }) # â Add this line to create the table used in the sidebar: completeness_table = completeness_df.set_index("Field") # Main bar chart display fig = px.bar(completeness_df, x="Field", y="Completeness (%)", title="Metadata Completeness by Field") st.plotly_chart(fig) # Identify incomplete records incomplete_mask = metadata_df.map(is_incomplete).any(axis=1) incomplete_records = metadata_df[incomplete_mask] st.subheader("⨠Suggested Metadata Enhancements") incomplete_with_desc = incomplete_records[incomplete_records['description'].notnull()] reference_df = metadata_df[metadata_df['subject'].notnull() & metadata_df['description'].notnull()] tfidf = TfidfVectorizer(stop_words='english') if len(incomplete_with_desc) > 1 and len(reference_df) > 1: try: suggestions = [] tfidf_matrix = tfidf.fit_transform(reference_df['description']) for idx, row in incomplete_with_desc.iterrows(): if pd.isna(row['subject']) and pd.notna(row['description']): desc_vec = tfidf.transform([str(row['description'])]) sims = cosine_similarity(desc_vec, tfidf_matrix).flatten() top_idx = sims.argmax() suggested_subject = reference_df.iloc[top_idx]['subject'] if pd.notna(suggested_subject) and suggested_subject: suggestions.append((row['title'], suggested_subject)) if suggestions: suggestions_df = pd.DataFrame(suggestions, columns=["Title", "Suggested Subject"]) st.markdown("