import requests
import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Custom CSS for styling to match the screenshot
st.markdown("""
""", unsafe_allow_html=True)
# Banner image
st.image("https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/7ThcAOjbuM8ajrP85bGs4.jpeg", use_container_width=True)
# App header
st.title("MetaDiscovery Agent for Library of Congress Collections")
st.markdown("""
This tool connects to the LOC API, retrieves metadata from a selected collection, and performs an
analysis of metadata completeness, suggests enhancements, and identifies authority gaps.
""")
# Collection URLs using the correct LOC API format
collections = {
"American Revolutionary War Maps": "american+revolutionary+war+maps",
"Civil War Maps": "civil+war+maps",
"Women's Suffrage": "women+suffrage",
"World War I Posters": "world+war+posters"
}
# Initialize metadata_df variable
metadata_df = pd.DataFrame()
# Add collection selector to sidebar
selected = st.sidebar.selectbox("Select a collection", list(collections.keys()), key="collection_selector")
search_query = collections[selected]
# Define the collection URL
collection_url = f"https://www.loc.gov/search/?q={search_query}&fo=json"
# Create placeholders for sidebar elements
stats_placeholder = st.sidebar.empty()
completeness_placeholder = st.sidebar.empty()
# Helpful Resources (styled section in sidebar)
st.sidebar.markdown("""
""", unsafe_allow_html=True)
# Set fetch_data to True to automatically fetch data
fetch_data = True
if fetch_data:
# Display a loading spinner while fetching data
with st.spinner(f"Fetching data for {selected}..."):
# Fetch data from LOC API with spoofed User-Agent header
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/110.0.0.0 Safari/537.36"
}
try:
response = requests.get(collection_url, headers=headers)
response.raise_for_status()
data = response.json()
if "results" in data:
records = data.get("results", [])
elif "items" in data:
records = data.get("items", [])
else:
records = []
st.error("Unexpected API response structure. No records found.")
st.write(f"Retrieved {len(records)} records")
except requests.exceptions.RequestException as e:
st.error(f"API Connection Error: {e}")
records = []
except ValueError:
st.error("Failed to parse API response as JSON")
records = []
# Extract selected metadata fields
items = []
for record in records:
if isinstance(record, dict):
description = record.get("description", "")
if isinstance(description, list):
description = " ".join([str(d) for d in description])
item = {
"id": record.get("id", ""),
"title": record.get("title", ""),
"date": record.get("date", ""),
"subject": ", ".join(record.get("subject", [])) if isinstance(record.get("subject"), list) else record.get("subject", ""),
"creator": record.get("creator", ""),
"description": description
}
if not item["title"] and "item" in record:
item["title"] = record.get("item", {}).get("title", "")
if not item["date"] and "item" in record:
item["date"] = record.get("item", {}).get("date", "")
items.append(item)
metadata_df = pd.DataFrame(items)
# Define custom completeness check
def is_incomplete(value):
return pd.isna(value) or value in ["", "N/A", "null", None]
if not metadata_df.empty:
# Incomplete record detection
incomplete_mask = metadata_df.apply(lambda row: row.map(is_incomplete), axis=1).any(axis=1)
incomplete_count = incomplete_mask.sum()
# Overall completeness
total_fields = metadata_df.size
filled_fields = metadata_df.apply(lambda row: row.map(lambda x: not is_incomplete(x)), axis=1).sum().sum()
overall_percent = (filled_fields / total_fields) * 100
# Add "Overall Metadata Completeness" indicator to sidebar
st.sidebar.markdown(
f"""
Overall Metadata Completeness:
{overall_percent:.1f}%
""",
unsafe_allow_html=True
)
# Field-by-field completeness
completeness = metadata_df.map(lambda x: not is_incomplete(x)).mean() * 100
completeness_table = completeness.round(1).to_frame(name="Completeness (%)")
# Render stats summary in sidebar
stats_html = f"""
"""
stats_placeholder.markdown(stats_html, unsafe_allow_html=True)
# Fill the Field Completeness Breakdown placeholder
with completeness_placeholder:
st.markdown("""
Field Completeness Breakdown
""", unsafe_allow_html=True)
# Create a styled dataframe showing completeness percentages
completeness_df = pd.DataFrame({
"Field": completeness.index,
"Completeness (%)": completeness.values
})
# Display the dataframe directly in the sidebar
st.dataframe(
completeness_df.style.background_gradient(cmap="Greens").format("{:.1f}%"),
use_container_width=True,
height=240
)
st.markdown("", unsafe_allow_html=True)
# Display retrieved metadata sample in main panel
st.subheader("Retrieved Metadata Sample")
st.dataframe(metadata_df.head())
# Metadata completeness analysis (bar chart)
st.subheader("Metadata Completeness Analysis")
# Create a bar chart with a dark theme to match the screenshot
fig = px.bar(
completeness_df,
x="Field",
y="Completeness (%)",
title="Metadata Completeness by Field",
color="Completeness (%)",
color_continuous_scale="Greens"
)
# Update the chart layout to match dark theme
fig.update_layout(
plot_bgcolor="#1A1A1A",
paper_bgcolor="#1A1A1A",
font_color="white",
title_font_color="white",
margin=dict(l=10, r=10, t=40, b=10),
coloraxis_showscale=False
)
# Update axes
fig.update_xaxes(title_font_color="white", tickfont_color="white", gridcolor="#333333")
fig.update_yaxes(title_font_color="white", tickfont_color="white", gridcolor="#333333")
st.plotly_chart(fig, use_container_width=True)
# Enhanced Metadata section
st.subheader("✨ Suggested Metadata Enhancements")
# Identify incomplete records with descriptions
incomplete_mask = metadata_df.map(is_incomplete).any(axis=1)
incomplete_records = metadata_df[incomplete_mask]
incomplete_with_desc = incomplete_records[incomplete_records['description'].notnull()]
reference_df = metadata_df[metadata_df['subject'].notnull() & metadata_df['description'].notnull()]
# Create TF-IDF vectorizer
tfidf = TfidfVectorizer(stop_words='english')
if len(incomplete_with_desc) > 1 and len(reference_df) > 1:
try:
suggestions = []
tfidf_matrix = tfidf.fit_transform(reference_df['description'])
for idx, row in incomplete_with_desc.iterrows():
if pd.isna(row['subject']) and pd.notna(row['description']):
desc_vec = tfidf.transform([str(row['description'])])
sims = cosine_similarity(desc_vec, tfidf_matrix).flatten()
top_idx = sims.argmax()
suggested_subject = reference_df.iloc[top_idx]['subject']
if pd.notna(suggested_subject) and suggested_subject:
suggestions.append((row['title'], suggested_subject))
if suggestions:
suggestions_df = pd.DataFrame(suggestions, columns=["Title", "Suggested Subject"])
st.markdown("" + suggestions_df.to_markdown(index=False) + "
", unsafe_allow_html=True)
else:
st.markdown("""
No metadata enhancement suggestions available.
""", unsafe_allow_html=True)
except Exception as e:
st.error(f"Error generating metadata suggestions: {e}")
else:
st.markdown("""
Not enough descriptive data to generate metadata suggestions.
""", unsafe_allow_html=True)
else:
st.warning("⚠️ No metadata records found for this collection. Try selecting another one.")