import requests
import pandas as pd
import numpy as np
import streamlit as st
import matplotlib
import plotly.express as px
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Custom CSS
st.markdown("""
""", unsafe_allow_html=True)
# Use an image from a URL for the banner
st.image("https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/7ThcAOjbuM8ajrP85bGs4.jpeg", use_container_width=True)
# Streamlit app header
st.title("MetaDiscovery Agent for Library of Congress Collections")
st.markdown("""
This tool connects to the LOC API, retrieves metadata from a selected collection, and performs
an analysis of metadata completeness, suggests enhancements, and identifies authority gaps.
""")
# Updated collection URLs using the correct LOC API
collections = {
"American Revolutionary War Maps": "american+revolutionary+war+maps",
"Civil War Maps": "civil+war+maps",
"Women's Suffrage": "women+suffrage",
"World War I Posters": "world+war+posters"
}
# Sidebar for selecting collection
#st.sidebar.markdown("## Settings")
# Create empty metadata_df variable to ensure it exists before checking
metadata_df = pd.DataFrame()
# Add a key to the selectbox to ensure it refreshes properly
with st.sidebar:
st.markdown("""
""", unsafe_allow_html=True)
selected = st.radio("Select a Collection", list(collections.keys()), key="collection_selector")
st.markdown("
", unsafe_allow_html=True)
search_query = collections[selected]
# Define the collection URL
collection_url = f"https://www.loc.gov/search/?q={search_query}&fo=json"
# Create an empty placeholder for Quick Stats
stats_placeholder = st.sidebar.empty()
# Add a fetch button to make the action explicit
fetch_data = True
if fetch_data:
# Display a loading spinner while fetching data
with st.spinner(f"Fetching data for {selected}..."):
# Fetch data from LOC API with spoofed User-Agent header
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/110.0.0.0 Safari/537.36"
}
try:
response = requests.get(collection_url, headers=headers)
response.raise_for_status()
data = response.json()
if "results" in data:
records = data.get("results", [])
elif "items" in data:
records = data.get("items", [])
else:
records = []
st.error("Unexpected API response structure. No records found.")
st.write(f"Retrieved {len(records)} records")
except requests.exceptions.RequestException as e:
st.error(f"API Connection Error: {e}")
records = []
except ValueError:
st.error("Failed to parse API response as JSON")
records = []
# Extract selected metadata fields
items = []
for record in records:
if isinstance(record, dict):
description = record.get("description", "")
if isinstance(description, list):
description = " ".join([str(d) for d in description])
item = {
"id": record.get("id", ""),
"title": record.get("title", ""),
"date": record.get("date", ""),
"subject": ", ".join(record.get("subject", [])) if isinstance(record.get("subject"), list) else record.get("subject", ""),
"creator": record.get("creator", ""),
"description": description
}
if not item["title"] and "item" in record:
item["title"] = record.get("item", {}).get("title", "")
if not item["date"] and "item" in record:
item["date"] = record.get("item", {}).get("date", "")
items.append(item)
metadata_df = pd.DataFrame(items)
# Define custom completeness check
def is_incomplete(value):
return pd.isna(value) or value in ["", "N/A", "null", None]
if not metadata_df.empty:
incomplete_mask = metadata_df.map(is_incomplete).any(axis=1)
incomplete_count = incomplete_mask.sum()
total_fields = metadata_df.size
filled_fields = (~metadata_df.map(is_incomplete)).sum().sum()
overall_percent = (filled_fields / total_fields) * 100
# Field-level completeness
completeness = (~metadata_df.map(is_incomplete)).mean() * 100
completeness_df = pd.DataFrame({"Field": completeness.index, "Completeness (%)": completeness.values})
completeness_table = completeness_df.set_index("Field")
# Sidebar Quick Stats
quick_stats_df = pd.DataFrame({
"Metric": ["Total Records", "Incomplete Records", "Overall Completeness (%)"],
"Value": [len(metadata_df), incomplete_count, round(overall_percent, 1)]
})
# Card-like background container
st.sidebar.markdown("""
Quick Stats
""", unsafe_allow_html=True)
# Reset index to hide row numbers
quick_stats_df_reset = quick_stats_df.reset_index(drop=True)
# Style with orange gradient
styled_stats = (
quick_stats_df_reset.style
.background_gradient(cmap="Oranges", subset=["Value"])
.format({"Value": "{:.1f}"})
)
# Display styled dataframe without index
st.sidebar.dataframe(
styled_stats,
use_container_width=False,
height=240
)
# Calculate Top 10 Subjects
if 'subject' in metadata_df.columns:
top_subjects = (
metadata_df['subject']
.dropna()
.str.split(',')
.explode()
.str.strip()
.value_counts()
.head(10)
.to_frame(name="Count")
)
#Most Common Subjects in Sidebar
with st.sidebar.expander("Top 10 Most Common Subjects", expanded=True):
st.dataframe(
top_subjects.style.background_gradient(cmap="Greens").format("{:.0f}"),
use_container_width=True,
height=240
)
with st.sidebar.expander("Helpful Resources", expanded=False):
st.markdown("""
""", unsafe_allow_html=True)
# Utility functions for deeper metadata quality analysis
def is_incomplete(value):
return pd.isna(value) or value in ["", "N/A", "null", None]
def is_valid_date(value):
try:
pd.to_datetime(value)
return True
except:
return False
if not metadata_df.empty:
st.subheader("Retrieved Metadata Sample")
st.dataframe(metadata_df.head())
# Fill the placeholder created earlier
st.subheader("Field Completeness Breakdown")
st.markdown("""
""", unsafe_allow_html=True)
st.dataframe(
completeness_table.style.background_gradient(cmap="Greens").format("{:.1f}%"),
use_container_width=True,
height=240
)
st.markdown("
", unsafe_allow_html=True)
# Identify incomplete records
incomplete_mask = metadata_df.map(is_incomplete).any(axis=1)
incomplete_records = metadata_df[incomplete_mask]
st.subheader("Suggested Metadata Enhancements")
incomplete_with_desc = incomplete_records[incomplete_records['description'].notnull()]
reference_df = metadata_df[metadata_df['subject'].notnull() & metadata_df['description'].notnull()]
tfidf = TfidfVectorizer(stop_words='english')
if len(incomplete_with_desc) > 1 and len(reference_df) > 1:
try:
suggestions = []
tfidf_matrix = tfidf.fit_transform(reference_df['description'])
for idx, row in incomplete_with_desc.iterrows():
if pd.isna(row['subject']) and pd.notna(row['description']):
desc_vec = tfidf.transform([str(row['description'])])
sims = cosine_similarity(desc_vec, tfidf_matrix).flatten()
top_idx = sims.argmax()
suggested_subject = reference_df.iloc[top_idx]['subject']
if pd.notna(suggested_subject) and suggested_subject:
suggestions.append((row['title'], suggested_subject))
if suggestions:
suggestions_df = pd.DataFrame(suggestions, columns=["Title", "Suggested Subject"])
st.markdown("" + suggestions_df.to_markdown(index=False) + "
", unsafe_allow_html=True)
else:
st.markdown("""
No metadata enhancement suggestions available.
""", unsafe_allow_html=True)
except Exception as e:
st.error(f"Error generating metadata suggestions: {e}")
else:
st.markdown("""
Not enough descriptive data to generate metadata suggestions.
""", unsafe_allow_html=True)
else:
st.warning("No metadata records found for this collection. Try selecting another one.")