File size: 6,205 Bytes
553537a
 
a1c1173
a11d742
628d40e
a9d5552
 
a1c1173
 
 
 
876b12f
a9d5552
 
 
a1c1173
 
 
876b12f
553537a
 
a11d742
9a2420b
a11d742
a9d5552
628d40e
 
a9d5552
 
 
 
 
 
628d40e
a1c1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553537a
a1c1173
 
 
 
 
 
 
 
 
 
 
 
 
876b12f
a1c1173
 
628d40e
7c1cdd8
628d40e
148b44f
628d40e
 
 
 
 
a1c1173
 
a11d742
 
 
 
 
a1c1173
 
9a2420b
a11d742
 
876b12f
a1c1173
 
 
 
 
 
628d40e
a11d742
 
 
 
 
 
 
 
 
 
 
 
 
55cdb25
a11d742
 
 
 
 
 
 
 
 
55cdb25
 
a11d742
 
55cdb25
 
a11d742
 
 
 
a1c1173
628d40e
7c1cdd8
628d40e
 
 
 
 
 
 
 
 
a1c1173
628d40e
a1c1173
 
876b12f
a1c1173
a11d742
 
a1c1173
a11d742
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, HttpUrl
from typing import Dict, Any, List
import logging
import os
from supabase import AsyncClient
from dotenv import load_dotenv

from mediaunmasked.scrapers.article_scraper import ArticleScraper
from mediaunmasked.analyzers.scoring import MediaScorer
from mediaunmasked.utils.logging_config import setup_logging

# Load environment variables
load_dotenv()

# Initialize logging
setup_logging()
logger = logging.getLogger(__name__)

# Initialize router and dependencies
router = APIRouter(tags=["analysis"])
scraper = ArticleScraper()
scorer = MediaScorer()

# Get Supabase credentials
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")

# Initialize Supabase client
if not SUPABASE_URL or not SUPABASE_KEY:
    raise Exception("Supabase credentials not found in environment variables")

supabase = AsyncClient(SUPABASE_URL, SUPABASE_KEY)

class ArticleRequest(BaseModel):
    url: HttpUrl

class MediaScoreDetails(BaseModel):
    headline_analysis: Dict[str, Any]
    sentiment_analysis: Dict[str, Any]
    bias_analysis: Dict[str, Any]
    evidence_analysis: Dict[str, Any]

class MediaScore(BaseModel):
    media_unmasked_score: float
    rating: str
    details: MediaScoreDetails

class AnalysisResponse(BaseModel):
    headline: str
    content: str
    sentiment: str
    bias: str
    bias_score: float
    bias_percentage: float
    media_score: MediaScore

@router.post("/analyze", response_model=AnalysisResponse)
async def analyze_article(request: ArticleRequest) -> AnalysisResponse:
    """
    Analyze an article for bias, sentiment, and credibility.
    
    Args:
        request: ArticleRequest containing the URL to analyze
        
    Returns:
        AnalysisResponse with complete analysis results
        
    Raises:
        HTTPException: If scraping or analysis fails
    """
    try:
        logger.info(f"Analyzing article: {request.url}")
        
        # Check if the article has already been analyzed
        existing_article = await supabase.table('article_analysis').select('*').eq('url', str(request.url)).execute()
        
        if existing_article.data and len(existing_article.data) > 0:
            logger.info("Article already analyzed. Returning cached data.")
            # Return the existing analysis result if it exists
            cached_data = existing_article.data[0]
            return AnalysisResponse.parse_obj(cached_data)
        
        # Scrape article
        article = scraper.scrape_article(str(request.url))
        if not article:
            raise HTTPException(
                status_code=400,
                detail="Failed to scrape article content"
            )
        
        # Analyze content
        analysis = scorer.calculate_media_score(
            article["headline"],
            article["content"]
        )
        
        # Log raw values for debugging
        logger.info("Raw values:")
        logger.info(f"media_unmasked_score type: {type(analysis['media_unmasked_score'])}")
        logger.info(f"media_unmasked_score value: {analysis['media_unmasked_score']}")
        
        # Prepare response data
        response_dict = {
            "headline": str(article['headline']),
            "content": str(article['content']),
            "sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
            "bias": str(analysis['details']['bias_analysis']['bias']),
            "bias_score": float(analysis['details']['bias_analysis']['bias_score']),
            "bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage']),
            "media_score": {
                "media_unmasked_score": float(analysis['media_unmasked_score']),
                "rating": str(analysis['rating']),
                "details": {
                    "headline_analysis": {
                        "headline_vs_content_score": float(analysis['details']['headline_analysis']['headline_vs_content_score']),
                        "flagged_phrases": analysis['details']['headline_analysis'].get('flagged_phrases', [])
                    },
                    "sentiment_analysis": {
                        "sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
                        "manipulation_score": float(analysis['details']['sentiment_analysis']['manipulation_score']),
                        "flagged_phrases": list(analysis['details']['sentiment_analysis']['flagged_phrases'])
                    },
                    "bias_analysis": {
                        "bias": str(analysis['details']['bias_analysis']['bias']),
                        "bias_score": float(analysis['details']['bias_analysis']['bias_score']),
                        "bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage']),
                        "flagged_phrases": list(analysis['details']['bias_analysis']['flagged_phrases'])
                    },
                    "evidence_analysis": {
                        "evidence_based_score": float(analysis['details']['evidence_analysis']['evidence_based_score']),
                        "flagged_phrases": list(analysis['details']['evidence_analysis']['flagged_phrases'])
                    }
                }
            }
        }
        
        # Save the new analysis to Supabase
        await supabase.table('article_analysis').upsert({
            'url': str(request.url),
            'headline': response_dict['headline'],
            'content': response_dict['content'],
            'sentiment': response_dict['sentiment'],
            'bias': response_dict['bias'],
            'bias_score': response_dict['bias_score'],
            'bias_percentage': response_dict['bias_percentage'],
            'media_score': response_dict['media_score']
        }).execute()
        
        # Return the response
        return AnalysisResponse.parse_obj(response_dict)
        
    except Exception as e:
        logger.error(f"Analysis failed: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=500,
            detail=f"Analysis failed: {str(e)}"
        )