cyberandy's picture
Create app.py
dfbd641 verified
raw
history blame
13 kB
import gradio as gr
import requests
from bs4 import BeautifulSoup
import os
import json
import logging
import pandas as pd # Useful for creating the dataframe output
# ------------------------
# Configuration
# ------------------------
WORDLIFT_API_URL = "https://api.wordlift.io/content-evaluations"
WORDLIFT_API_KEY = os.getenv("WORDLIFT_API_KEY") # Get API key from environment variable
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# ------------------------
# Custom CSS & Theme
# ------------------------
css = """
@import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@300;400;600;700&display=swap');
body {
font-family: 'Open Sans', sans-serif !important;
}
.primary-btn {
background-color: #3452db !important;
color: white !important;
}
.primary-btn:hover {
background-color: #2a41af !important;
}
.gradio-container {
max-width: 1200px; /* Limit width for better readability */
margin: auto;
}
"""
theme = gr.themes.Soft(
primary_hue=gr.themes.colors.Color(
name="blue",
c50="#eef1ff",
c100="#e0e5ff",
c200="#c3cbff",
c300="#a5b2ff",
c400="#8798ff",
c500="#6a7eff",
c600="#3452db",
c700="#2a41af",
c800="#1f3183",
c900="#152156",
c950="#0a102b",
)
)
# ------------------------
# Content Fetching Logic
# ------------------------
def fetch_content_from_url(url: str, timeout: int = 15) -> str:
"""Fetches main text content from a URL."""
logger.info(f"Fetching content from: {url}")
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
response = requests.get(url, headers=headers, timeout=timeout)
response.raise_for_status() # Raise an exception for bad status codes
soup = BeautifulSoup(response.content, 'html.parser')
# Attempt to find main content block
main_content = soup.find('main') or soup.find('article')
if main_content:
# Extract text from common text-containing tags within the main block
text_elements = main_content.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'blockquote'])
text = ' '.join([elem.get_text() for elem in text_elements])
else:
# Fallback to extracting text from body if no main block found
text_elements = soup.body.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'blockquote'])
text = ' '.join([elem.get_text() for elem in text_elements])
logger.warning(f"No <main> or <article> found for {url}, extracting from body.")
# Clean up extra whitespace
text = ' '.join(text.split())
# Limit text length to avoid excessively large API calls (adjust as needed)
max_text_length = 150000 # approx 25k words, adjust based on API limits/cost
if len(text) > max_text_length:
logger.warning(f"Content for {url} is too long ({len(text)} chars), truncating to {max_text_length} chars.")
text = text[:max_text_length] + "..." # Indicate truncation
return text
except requests.exceptions.RequestException as e:
logger.error(f"Failed to fetch content from {url}: {e}")
return None
except Exception as e:
logger.error(f"Error processing content from {url}: {e}")
return None
# ------------------------
# WordLift API Call Logic
# ------------------------
def call_wordlift_api(text: str, keywords: Optional[List[str]] = None) -> Optional[Dict[str, Any]]:
"""Calls the WordLift Content Evaluation API."""
if not WORDLIFT_API_KEY:
logger.error("WORDLIFT_API_KEY environment variable not set.")
return {"error": "API key not configured."}
if not text:
return {"error": "No content provided or fetched."}
payload = {
"text": text,
"keywords": keywords if keywords else []
}
headers = {
'Authorization': f'Key {WORDLIFT_API_KEY}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
logger.info(f"Calling WordLift API with text length {len(text)} and {len(keywords or [])} keywords.")
try:
response = requests.post(WORDLIFT_API_URL, headers=headers, json=payload, timeout=60) # Increased timeout
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
return response.json()
except requests.exceptions.HTTPError as e:
logger.error(f"WordLift API HTTP error: {e.response.status_code} - {e.response.text}")
try:
error_detail = e.response.json()
except json.JSONDecodeError:
error_detail = e.response.text
return {"error": f"API returned status code {e.response.status_code}", "details": error_detail}
except requests.exceptions.RequestException as e:
logger.error(f"WordLift API request error: {e}")
return {"error": f"API request failed: {e}"}
except Exception as e:
logger.error(f"Unexpected error during API call: {e}")
return {"error": f"An unexpected error occurred: {e}"}
# ------------------------
# Main Evaluation Batch Function
# ------------------------
def evaluate_urls_batch(url_data: pd.DataFrame):
"""
Evaluates a batch of URLs using the WordLift API.
Args:
url_data: A pandas DataFrame with columns ['URL', 'Target Keywords (comma-separated)'].
Returns:
A tuple containing:
- A pandas DataFrame with the summary results.
- A dictionary containing the full results (including errors) keyed by URL.
"""
if not url_data or url_data.empty:
return pd.DataFrame(columns=['URL', 'Status', 'Overall Score', 'Content Purpose', 'Content Accuracy', 'Content Depth', 'Readability Score (API)', 'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details']), {}
summary_results = []
full_results = {}
for index, row in url_data.iterrows():
url = row['URL'].strip()
keywords_str = row['Target Keywords (comma-separated)'].strip() if row['Target Keywords (comma-separated)'] else ""
keywords = [kw.strip() for kw in keywords_str.split(',') if kw.strip()]
if not url:
summary_results.append([url, "Skipped", None, None, None, None, None, None, None, None, "Empty URL"])
full_results[url if url else f"Row_{index}"] = {"status": "Skipped", "error": "Empty URL input."}
continue
logger.info(f"Processing URL: {url} with keywords: {keywords}")
# 1. Fetch Content
content = fetch_content_from_url(url)
if content is None or not content.strip():
status = "Failed"
error_msg = "Failed to fetch or extract content."
summary_results.append([url, status, None, None, None, None, None, None, None, None, error_msg])
full_results[url] = {"status": status, "error": error_msg}
logger.error(f"Processing failed for {url}: {error_msg}")
continue # Move to next URL
# 2. Call WordLift API
api_result = call_wordlift_api(content, keywords)
# 3. Process API Result
summary_row = [url]
if api_result and "error" not in api_result:
status = "Success"
qs = api_result.get('quality_score', {})
breakdown = qs.get('breakdown', {})
content_breakdown = breakdown.get('content', {})
readability_breakdown = breakdown.get('readability', {})
seo_breakdown = breakdown.get('seo', {})
metadata = api_result.get('metadata', {})
summary_row.extend([
status,
qs.get('overall', None),
content_breakdown.get('purpose', None),
content_breakdown.get('accuracy', None),
content_breakdown.get('depth', None),
readability_breakdown.get('score', None), # API's readability score (e.g. 2.5)
readability_breakdown.get('grade_level', None),
seo_breakdown.get('score', None),
metadata.get('word_count', None),
None # No error
])
full_results[url] = api_result # Store full API result
else:
status = "Failed"
error_msg = api_result.get("error", "Unknown API error.") if api_result else "API call failed."
details = api_result.get("details", "") if api_result else ""
summary_row.extend([
status,
None, None, None, None, None, None, None, None,
f"{error_msg} {details}"
])
full_results[url] = {"status": status, "error": error_msg, "details": details}
logger.error(f"API call failed for {url}: {error_msg} {details}")
summary_results.append(summary_row)
# Create pandas DataFrame for summary output
summary_df = pd.DataFrame(summary_results, columns=[
'URL', 'Status', 'Overall Score', 'Content Purpose',
'Content Accuracy', 'Content Depth', 'Readability Score (API)',
'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details'
])
# Format numeric columns for display if they are not None
for col in ['Overall Score', 'Content Purpose', 'Content Accuracy', 'Content Depth', 'Readability Score (API)', 'Readability Grade Level', 'SEO Score', 'Word Count']:
if col in summary_df.columns:
# Convert to numeric, coercing errors, then format
summary_df[col] = pd.to_numeric(summary_df[col], errors='coerce')
if col in ['Overall Score', 'Readability Score (API)', 'SEO Score']:
summary_df[col] = summary_df[col].apply(lambda x: f'{x:.1f}' if pd.notna(x) else '-')
else:
summary_df[col] = summary_df[col].apply(lambda x: f'{int(x)}' if pd.notna(x) else '-')
return summary_df, full_results
# ------------------------
# Gradio Blocks Interface Setup
# ------------------------
with gr.Blocks(css=css, theme=theme) as demo:
gr.Markdown("# WordLift Multi-URL Content Evaluator")
gr.Markdown(
"Enter up to 30 URLs in the table below. "
"Optionally, provide comma-separated target keywords for each URL. "
"The app will fetch content from each URL and evaluate it using the WordLift API."
)
with gr.Row():
with gr.Column():
url_input_df = gr.Dataframe(
headers=["URL", "Target Keywords (comma-separated)"],
datatype=["str", "str"],
row_count=(1, 30), # Allow adding rows up to 30
col_count=(2, "fixed"),
value=[["https://example.com/article1", "keyword A, keyword B"], ["https://example.com/article2", ""]], # Default examples
label="URLs and Keywords"
)
submit_button = gr.Button("Evaluate All URLs", elem_classes=["primary-btn"])
gr.Markdown("## Evaluation Results")
with gr.Column():
summary_output_df = gr.DataFrame(
label="Summary Results",
headers=['URL', 'Status', 'Overall Score', 'Content Purpose', 'Content Accuracy', 'Content Depth', 'Readability Score (API)', 'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details'],
datatype=["str", "str", "str", "str", "str", "str", "str", "str", "str", "str", "str"], # Use str to handle '-' for missing values
wrap=True # Wrap text in columns
)
with gr.Accordion("Full JSON Results", open=False):
full_results_json = gr.JSON(label="Raw API Results per URL")
submit_button.click(
fn=evaluate_urls_batch,
inputs=[url_input_df],
outputs=[summary_output_df, full_results_json]
)
# Launch the app
if __name__ == "__main__":
if not WORDLIFT_API_KEY:
logger.error("\n----------------------------------------------------------")
logger.error("WORDLIFT_API_KEY environment variable is not set.")
logger.error("Please set it before running the script:")
logger.error(" export WORDLIFT_API_KEY='YOUR_API_KEY'")
logger.error("Or if using a .env file and python-dotenv:")
logger.error(" pip install python-dotenv")
logger.error(" # Add WORDLIFT_API_KEY=YOUR_API_KEY to a .env file")
logger.error(" # import dotenv; dotenv.load_dotenv()")
logger.error(" # in your script before getting the key.")
logger.error("----------------------------------------------------------\n")
# Optionally exit or raise error here if the key is strictly required to launch
# exit()
pass # Allow launching, but API calls will fail
logger.info("Launching Gradio app...")
demo.launch()