Spaces:
Running
Running
removing extra analyzer_service.py
Browse files
mediaunmasked/services/analyzer_service.py
DELETED
|
@@ -1,84 +0,0 @@
|
|
| 1 |
-
from mediaunmasked.analyzers.headline_analyzer import HeadlineAnalyzer
|
| 2 |
-
from mediaunmasked.schemas.requests import AnalyzeRequest, AnalysisResponse
|
| 3 |
-
from fastapi import HTTPException
|
| 4 |
-
from mediaunmasked.scrapers.article_scraper import ArticleScraper
|
| 5 |
-
from mediaunmasked.analyzers.scoring import MediaScorer
|
| 6 |
-
import logging
|
| 7 |
-
|
| 8 |
-
logger = logging.getLogger(__name__)
|
| 9 |
-
scraper = ArticleScraper()
|
| 10 |
-
scorer = MediaScorer()
|
| 11 |
-
|
| 12 |
-
class AnalyzerService:
|
| 13 |
-
def __init__(self):
|
| 14 |
-
self.headline_analyzer = HeadlineAnalyzer()
|
| 15 |
-
|
| 16 |
-
async def analyze_content(self, headline: str, content: str):
|
| 17 |
-
result = self.headline_analyzer.analyze(headline, content)
|
| 18 |
-
return result
|
| 19 |
-
|
| 20 |
-
async def analyze_url(self, request: AnalyzeRequest) -> AnalysisResponse:
|
| 21 |
-
"""
|
| 22 |
-
Analyze an article for bias, sentiment, and credibility.
|
| 23 |
-
"""
|
| 24 |
-
try:
|
| 25 |
-
# Convert URL to string explicitly
|
| 26 |
-
url_str = str(request.url)
|
| 27 |
-
logger.info(f"Analyzing article: {url_str}")
|
| 28 |
-
|
| 29 |
-
# Scrape article (now synchronous)
|
| 30 |
-
article = scraper.scrape_article(url_str)
|
| 31 |
-
if not article:
|
| 32 |
-
raise HTTPException(
|
| 33 |
-
status_code=400,
|
| 34 |
-
detail="Failed to scrape article content"
|
| 35 |
-
)
|
| 36 |
-
|
| 37 |
-
# Analyze content
|
| 38 |
-
analysis = scorer.calculate_media_score(
|
| 39 |
-
article["headline"],
|
| 40 |
-
article["content"]
|
| 41 |
-
)
|
| 42 |
-
|
| 43 |
-
# Construct response
|
| 44 |
-
response_dict = {
|
| 45 |
-
"headline": str(article['headline']),
|
| 46 |
-
"content": str(article['content']),
|
| 47 |
-
"sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
|
| 48 |
-
"bias": str(analysis['details']['bias_analysis']['bias']),
|
| 49 |
-
"bias_score": float(analysis['details']['bias_analysis']['bias_score']),
|
| 50 |
-
"bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage']),
|
| 51 |
-
"flagged_phrases": list(analysis['details']['sentiment_analysis']['flagged_phrases']),
|
| 52 |
-
"media_score": {
|
| 53 |
-
"media_unmasked_score": float(analysis['media_unmasked_score']),
|
| 54 |
-
"rating": str(analysis['rating']),
|
| 55 |
-
"details": {
|
| 56 |
-
"headline_analysis": {
|
| 57 |
-
"headline_vs_content_score": float(analysis['details']['headline_analysis']['headline_vs_content_score']),
|
| 58 |
-
"contradictory_phrases": analysis['details']['headline_analysis'].get('contradictory_phrases', [])
|
| 59 |
-
},
|
| 60 |
-
"sentiment_analysis": {
|
| 61 |
-
"sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
|
| 62 |
-
"manipulation_score": float(analysis['details']['sentiment_analysis']['manipulation_score']),
|
| 63 |
-
"flagged_phrases": list(analysis['details']['sentiment_analysis']['flagged_phrases'])
|
| 64 |
-
},
|
| 65 |
-
"bias_analysis": {
|
| 66 |
-
"bias": str(analysis['details']['bias_analysis']['bias']),
|
| 67 |
-
"bias_score": float(analysis['details']['bias_analysis']['bias_score']),
|
| 68 |
-
"bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage'])
|
| 69 |
-
},
|
| 70 |
-
"evidence_analysis": {
|
| 71 |
-
"evidence_based_score": float(analysis['details']['evidence_analysis']['evidence_based_score'])
|
| 72 |
-
}
|
| 73 |
-
}
|
| 74 |
-
}
|
| 75 |
-
}
|
| 76 |
-
|
| 77 |
-
return AnalysisResponse.parse_obj(response_dict)
|
| 78 |
-
|
| 79 |
-
except Exception as e:
|
| 80 |
-
logger.error(f"Analysis failed inside of analyzer_service.py: {str(e)}", exc_info=True)
|
| 81 |
-
raise HTTPException(
|
| 82 |
-
status_code=500,
|
| 83 |
-
detail=f"Analysis failed inside of analyzer_service.py: {str(e)}"
|
| 84 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mediaunmasked/web/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
# Empty file is fine
|
|
|
|
|
|
mediaunmasked/web/app.py
DELETED
|
@@ -1,82 +0,0 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
from ..analyzers.bias_analyzer import BiasAnalyzer
|
| 3 |
-
from ..scrapers.article_scraper import ArticleScraper
|
| 4 |
-
from ..utils.logging_config import setup_logging
|
| 5 |
-
import plotly.graph_objects as go
|
| 6 |
-
|
| 7 |
-
def create_sentiment_gauge(score: float) -> go.Figure:
|
| 8 |
-
"""Create a gauge chart for sentiment visualization."""
|
| 9 |
-
fig = go.Figure(go.Indicator(
|
| 10 |
-
mode = "gauge+number",
|
| 11 |
-
value = score * 100,
|
| 12 |
-
title = {'text': "Sentiment Score"},
|
| 13 |
-
gauge = {
|
| 14 |
-
'axis': {'range': [0, 100]},
|
| 15 |
-
'bar': {'color': "darkblue"},
|
| 16 |
-
'steps': [
|
| 17 |
-
{'range': [0, 33], 'color': "lightgray"},
|
| 18 |
-
{'range': [33, 66], 'color': "gray"},
|
| 19 |
-
{'range': [66, 100], 'color': "darkgray"}
|
| 20 |
-
],
|
| 21 |
-
}
|
| 22 |
-
))
|
| 23 |
-
return fig
|
| 24 |
-
|
| 25 |
-
def main():
|
| 26 |
-
# Set up logging
|
| 27 |
-
setup_logging()
|
| 28 |
-
|
| 29 |
-
# Initialize components
|
| 30 |
-
scraper = ArticleScraper()
|
| 31 |
-
analyzer = BiasAnalyzer()
|
| 32 |
-
|
| 33 |
-
# Set up the Streamlit interface
|
| 34 |
-
st.title("Media Bias Analyzer")
|
| 35 |
-
st.write("Analyze bias and sentiment in news articles")
|
| 36 |
-
|
| 37 |
-
# URL input
|
| 38 |
-
url = st.text_input("Enter article URL:", "https://www.snopes.com/articles/469232/musk-son-told-trump-shut-up/")
|
| 39 |
-
|
| 40 |
-
if st.button("Analyze"):
|
| 41 |
-
with st.spinner("Analyzing article..."):
|
| 42 |
-
# Scrape the article
|
| 43 |
-
article = scraper.scrape_article(url)
|
| 44 |
-
|
| 45 |
-
if article:
|
| 46 |
-
# Show article details
|
| 47 |
-
st.subheader("Article Details")
|
| 48 |
-
st.write(f"**Headline:** {article['headline']}")
|
| 49 |
-
|
| 50 |
-
with st.expander("Show Article Content"):
|
| 51 |
-
st.write(article['content'])
|
| 52 |
-
|
| 53 |
-
# Analyze content
|
| 54 |
-
result = analyzer.analyze(article['content'])
|
| 55 |
-
|
| 56 |
-
# Display results in columns
|
| 57 |
-
col1, col2 = st.columns(2)
|
| 58 |
-
|
| 59 |
-
with col1:
|
| 60 |
-
st.subheader("Sentiment Analysis")
|
| 61 |
-
st.write(f"**Overall Sentiment:** {result.sentiment}")
|
| 62 |
-
fig = create_sentiment_gauge(result.bias_score / 100)
|
| 63 |
-
st.plotly_chart(fig)
|
| 64 |
-
|
| 65 |
-
with col2:
|
| 66 |
-
st.subheader("Bias Analysis")
|
| 67 |
-
st.write(f"**Detected Bias:** {result.bias}")
|
| 68 |
-
st.write(f"**Confidence Score:** {result.bias_score:.1f}%")
|
| 69 |
-
|
| 70 |
-
# Show flagged phrases
|
| 71 |
-
if result.flagged_phrases:
|
| 72 |
-
st.subheader("Potentially Biased Phrases")
|
| 73 |
-
for phrase in result.flagged_phrases:
|
| 74 |
-
st.warning(phrase)
|
| 75 |
-
else:
|
| 76 |
-
st.info("No potentially biased phrases detected")
|
| 77 |
-
|
| 78 |
-
else:
|
| 79 |
-
st.error("Failed to fetch article. Please check the URL and try again.")
|
| 80 |
-
|
| 81 |
-
if __name__ == "__main__":
|
| 82 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|