Spaces:
Running
Running
# MetaDiscovery Agent - LOC API with Collection Selector and Search Endpoint + Enhanced Features | |
import requests | |
import pandas as pd | |
import numpy as np | |
import streamlit as st | |
import plotly.express as px | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
from sklearn.metrics.pairwise import cosine_similarity | |
# Streamlit app header | |
st.title("MetaDiscovery Agent for Library of Congress Collections") | |
st.markdown(""" | |
This tool connects to the LOC API, retrieves metadata from a selected collection, and performs | |
an analysis of metadata completeness, suggests enhancements, and identifies authority gaps. | |
""") | |
# Updated collection URLs using the correct LOC API format | |
collections = { | |
"American Revolutionary War Maps": {"path": "maps", "query": "american+revolutionary+war"}, | |
"Civil War Maps": {"path": "maps", "query": "civil+war"}, | |
"Women's Suffrage": {"path": "collection", "query": "women+suffrage"}, | |
"World War I Posters": {"path": "pictures", "query": "world+war+I+posters"} | |
} | |
# Sidebar for selecting collection | |
st.sidebar.markdown("## Settings") | |
selected = st.sidebar.selectbox("Select a collection", list(collections.keys())) | |
collection_info = collections[selected] | |
# Correct URL format for LOC API | |
collection_url = f"https://www.loc.gov/{collection_info['path']}/search/?q={collection_info['query']}&fo=json" | |
st.sidebar.write(f"Selected Collection: {selected}") | |
st.sidebar.write(f"API URL: {collection_url}") | |
# Fetch data from LOC API with error handling | |
try: | |
response = requests.get(collection_url) | |
response.raise_for_status() # Raise exception for 4XX/5XX responses | |
data = response.json() | |
# Handle both possible response structures | |
if "results" in data: | |
records = data.get("results", []) | |
elif "items" in data: | |
records = data.get("items", []) | |
else: | |
records = [] | |
st.error("Unexpected API response structure. No records found.") | |
st.write(f"Retrieved {len(records)} records") | |
except requests.exceptions.RequestException as e: | |
st.error(f"API Connection Error: {e}") | |
records = [] | |
except ValueError: | |
st.error("Failed to parse API response as JSON") | |
records = [] | |
# Extract selected metadata fields with proper path traversal | |
items = [] | |
for record in records: | |
# Handle different possible data structures | |
if isinstance(record, dict): | |
# For direct field access | |
item = { | |
"id": record.get("id", ""), | |
"title": record.get("title", ""), | |
"date": record.get("date", ""), | |
"subject": ", ".join(record.get("subject", [])) if isinstance(record.get("subject"), list) else record.get("subject", ""), | |
"creator": record.get("creator", ""), | |
"description": record.get("description", "") | |
} | |
# For nested field access (common in LOC API) | |
if not item["title"] and "item" in record: | |
item["title"] = record.get("item", {}).get("title", "") | |
if not item["date"] and "item" in record: | |
item["date"] = record.get("item", {}).get("date", "") | |
items.append(item) | |
# Create DataFrame | |
metadata_df = pd.DataFrame(items) | |
if not metadata_df.empty: | |
st.subheader("π¦ Retrieved Metadata Sample") | |
st.dataframe(metadata_df.head()) | |
# Metadata completeness analysis | |
st.subheader("π§ Metadata Completeness Analysis") | |
completeness = metadata_df.notnull().mean() * 100 | |
completeness_df = pd.DataFrame({"Field": completeness.index, "Completeness (%)": completeness.values}) | |
# Plot completeness | |
fig = px.bar(completeness_df, x="Field", y="Completeness (%)", title="Metadata Completeness by Field") | |
st.plotly_chart(fig) | |
# List records with missing values | |
st.subheader("β οΈ Records with Incomplete Metadata") | |
incomplete_records = metadata_df[metadata_df.isnull().any(axis=1)] | |
if not incomplete_records.empty: | |
st.dataframe(incomplete_records) | |
else: | |
st.success("All metadata fields are complete in this collection!") | |
# Show exact items that need updates | |
st.subheader("π Identifiers of Items Needing Metadata Updates") | |
if not incomplete_records.empty: | |
st.write(incomplete_records[['id', 'title']]) | |
else: | |
st.success("All records are complete!") | |
# Suggest metadata using text similarity with better error handling | |
st.subheader("β¨ Suggested Metadata Enhancements") | |
# Only process if we have descriptions and enough data | |
filled_descriptions = metadata_df[metadata_df['description'].notnull()]['description'].astype(str) | |
if len(filled_descriptions) > 1: | |
try: | |
tfidf = TfidfVectorizer(stop_words='english') | |
tfidf_matrix = tfidf.fit_transform(filled_descriptions) | |
sim_matrix = cosine_similarity(tfidf_matrix) | |
suggestions = [] | |
for idx, row in incomplete_records.iterrows(): | |
if pd.isna(row['subject']) and pd.notna(row['description']): | |
desc_vec = tfidf.transform([str(row['description'])]) | |
sims = cosine_similarity(desc_vec, tfidf_matrix).flatten() | |
top_idx = sims.argmax() | |
suggested_subject = metadata_df.iloc[top_idx]['subject'] | |
if pd.notna(suggested_subject) and suggested_subject: # Only add valid suggestions | |
suggestions.append((row['title'], suggested_subject)) | |
if suggestions: | |
suggestions_df = pd.DataFrame(suggestions, columns=["Title", "Suggested Subject"]) | |
st.dataframe(suggestions_df) | |
else: | |
st.info("No metadata enhancement suggestions available.") | |
except Exception as e: | |
st.error(f"Error generating metadata suggestions: {e}") | |
else: | |
st.info("Not enough descriptive data to generate metadata suggestions.") | |
else: | |
st.warning("No metadata records found for this collection. Try selecting another one.") |