Spaces:
Build error
Build error
Update deliverable2.py
Browse files- deliverable2.py +113 -84
deliverable2.py
CHANGED
|
@@ -1,84 +1,113 @@
|
|
| 1 |
-
import requests
|
| 2 |
-
from bs4 import BeautifulSoup
|
| 3 |
-
import
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
from sentence_transformers import SentenceTransformer, util
|
| 4 |
+
from transformers import pipeline
|
| 5 |
+
|
| 6 |
+
class URLValidator:
|
| 7 |
+
"""
|
| 8 |
+
A production-ready URL validation class that evaluates the credibility of a webpage
|
| 9 |
+
using multiple factors: domain trust, content relevance, fact-checking, bias detection, and citations.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self):
|
| 13 |
+
# Load models once to avoid redundant API calls
|
| 14 |
+
self.similarity_model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2')
|
| 15 |
+
self.fake_news_classifier = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
| 16 |
+
self.sentiment_analyzer = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-sentiment")
|
| 17 |
+
|
| 18 |
+
def fetch_page_content(self, url: str) -> str:
|
| 19 |
+
""" Fetches and extracts text content from the given URL, handling errors gracefully. """
|
| 20 |
+
try:
|
| 21 |
+
headers = {"User-Agent": "Mozilla/5.0"} # Helps bypass some bot protections
|
| 22 |
+
response = requests.get(url, timeout=10, headers=headers)
|
| 23 |
+
response.raise_for_status()
|
| 24 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
| 25 |
+
|
| 26 |
+
content = " ".join([p.text for p in soup.find_all("p")])
|
| 27 |
+
return content if content else "Error: No readable content found on the page."
|
| 28 |
+
except requests.exceptions.Timeout:
|
| 29 |
+
return "Error: Request timed out."
|
| 30 |
+
except requests.exceptions.HTTPError as e:
|
| 31 |
+
return f"Error: HTTP {e.response.status_code} - Page may not exist."
|
| 32 |
+
except requests.exceptions.RequestException as e:
|
| 33 |
+
return f"Error: Unable to fetch URL ({str(e)})."
|
| 34 |
+
|
| 35 |
+
def get_domain_trust(self, url: str, content: str) -> int:
|
| 36 |
+
""" Computes the domain trust score. Uses a mock approach for now. """
|
| 37 |
+
if "Error" in content:
|
| 38 |
+
return 0 # If page fetch failed, trust is low
|
| 39 |
+
return len(url) % 5 + 1 # Mock trust rating (1-5)
|
| 40 |
+
|
| 41 |
+
def compute_similarity_score(self, user_query: str, content: str) -> int:
|
| 42 |
+
""" Computes semantic similarity between user query and page content. """
|
| 43 |
+
if "Error" in content:
|
| 44 |
+
return 0
|
| 45 |
+
return int(util.pytorch_cos_sim(
|
| 46 |
+
self.similarity_model.encode(user_query),
|
| 47 |
+
self.similarity_model.encode(content)
|
| 48 |
+
).item() * 100)
|
| 49 |
+
|
| 50 |
+
def check_facts(self, content: str) -> int:
|
| 51 |
+
""" Simulated function to check fact reliability. """
|
| 52 |
+
if "Error" in content:
|
| 53 |
+
return 0
|
| 54 |
+
return len(content) % 5 + 1 # Mock fact-check rating (1-5)
|
| 55 |
+
|
| 56 |
+
def detect_bias(self, content: str) -> int:
|
| 57 |
+
""" Uses NLP sentiment analysis to detect potential bias in content. """
|
| 58 |
+
if "Error" in content:
|
| 59 |
+
return 0
|
| 60 |
+
sentiment_result = self.sentiment_analyzer(content[:512])[0]
|
| 61 |
+
return 100 if sentiment_result["label"] == "POSITIVE" else 50 if sentiment_result["label"] == "NEUTRAL" else 30
|
| 62 |
+
|
| 63 |
+
def get_star_rating(self, score: float) -> tuple:
|
| 64 |
+
""" Converts a score (0-100) into a 1-5 star rating. """
|
| 65 |
+
stars = max(1, min(5, round(score / 20))) # Normalize 100-scale to 5-star scale
|
| 66 |
+
return stars, "⭐" * stars
|
| 67 |
+
|
| 68 |
+
def generate_explanation(self, domain_trust, similarity_score, fact_check_score, bias_score, final_score) -> str:
|
| 69 |
+
""" Generates a human-readable explanation for the score. """
|
| 70 |
+
reasons = []
|
| 71 |
+
if domain_trust < 50:
|
| 72 |
+
reasons.append("The source has low domain authority.")
|
| 73 |
+
if similarity_score < 50:
|
| 74 |
+
reasons.append("The content is not highly relevant to your query.")
|
| 75 |
+
if fact_check_score < 50:
|
| 76 |
+
reasons.append("Limited fact-checking verification found.")
|
| 77 |
+
if bias_score < 50:
|
| 78 |
+
reasons.append("Potential bias detected in the content.")
|
| 79 |
+
|
| 80 |
+
return " ".join(reasons) if reasons else "This source is highly credible and relevant."
|
| 81 |
+
|
| 82 |
+
def rate_url_validity(self, user_query: str, url: str) -> dict:
|
| 83 |
+
""" Main function to evaluate the validity of a webpage. """
|
| 84 |
+
content = self.fetch_page_content(url)
|
| 85 |
+
|
| 86 |
+
# If an error occurs, return the error message
|
| 87 |
+
if "Error" in content:
|
| 88 |
+
return {"Validation Error": content}
|
| 89 |
+
|
| 90 |
+
domain_trust = self.get_domain_trust(url, content)
|
| 91 |
+
similarity_score = self.compute_similarity_score(user_query, content)
|
| 92 |
+
fact_check_score = self.check_facts(content)
|
| 93 |
+
bias_score = self.detect_bias(content)
|
| 94 |
+
|
| 95 |
+
final_score = (
|
| 96 |
+
(0.3 * domain_trust) +
|
| 97 |
+
(0.3 * similarity_score) +
|
| 98 |
+
(0.2 * fact_check_score) +
|
| 99 |
+
(0.2 * bias_score)
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
stars, icon = self.get_star_rating(final_score)
|
| 103 |
+
explanation = self.generate_explanation(domain_trust, similarity_score, fact_check_score, bias_score, final_score)
|
| 104 |
+
|
| 105 |
+
return {
|
| 106 |
+
"Domain Trust": domain_trust,
|
| 107 |
+
"Content Relevance": similarity_score,
|
| 108 |
+
"Fact-Check Score": fact_check_score,
|
| 109 |
+
"Bias Score": bias_score,
|
| 110 |
+
"Final Validity Score": final_score,
|
| 111 |
+
"Star Rating": icon,
|
| 112 |
+
"Explanation": explanation
|
| 113 |
+
}
|