File size: 27,764 Bytes
38f2ab8
 
 
 
 
baf4a02
38f2ab8
 
baf4a02
38f2ab8
 
 
 
 
 
 
 
99a8de6
 
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
 
38f2ab8
 
 
baf4a02
99a8de6
 
 
baf4a02
 
99a8de6
 
 
 
 
 
 
 
 
 
 
baf4a02
 
 
99a8de6
 
baf4a02
99a8de6
 
 
baf4a02
 
99a8de6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
 
99a8de6
 
baf4a02
 
 
 
 
 
99a8de6
 
baf4a02
 
 
38f2ab8
baf4a02
38f2ab8
 
 
 
 
 
 
 
 
 
 
baf4a02
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
 
 
38f2ab8
 
 
baf4a02
 
38f2ab8
 
baf4a02
 
38f2ab8
 
baf4a02
 
 
 
 
 
38f2ab8
 
 
 
baf4a02
38f2ab8
baf4a02
38f2ab8
 
baf4a02
38f2ab8
 
 
 
 
baf4a02
38f2ab8
 
 
 
baf4a02
38f2ab8
 
 
 
baf4a02
 
 
 
 
 
 
38f2ab8
 
 
 
baf4a02
 
38f2ab8
 
 
 
 
 
 
 
 
 
baf4a02
 
38f2ab8
 
baf4a02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
baf4a02
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
 
 
38f2ab8
 
 
baf4a02
38f2ab8
baf4a02
38f2ab8
 
 
 
baf4a02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38f2ab8
 
 
 
baf4a02
 
38f2ab8
baf4a02
 
 
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
38f2ab8
 
 
baf4a02
 
 
 
 
38f2ab8
 
 
 
 
 
 
 
baf4a02
 
 
 
 
 
38f2ab8
baf4a02
 
 
38f2ab8
 
baf4a02
 
38f2ab8
baf4a02
38f2ab8
 
baf4a02
38f2ab8
 
 
baf4a02
38f2ab8
 
 
 
baf4a02
 
 
 
 
 
 
 
 
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
38f2ab8
 
 
 
baf4a02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38f2ab8
baf4a02
 
 
 
 
 
 
 
38f2ab8
 
baf4a02
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
38f2ab8
baf4a02
 
 
 
 
 
 
 
 
38f2ab8
 
baf4a02
38f2ab8
 
baf4a02
38f2ab8
 
 
 
baf4a02
 
 
 
 
 
 
 
 
 
38f2ab8
 
baf4a02
38f2ab8
 
baf4a02
 
 
 
 
 
 
 
 
38f2ab8
baf4a02
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baf4a02
38f2ab8
 
baf4a02
38f2ab8
 
 
baf4a02
 
38f2ab8
 
baf4a02
 
 
 
38f2ab8
 
 
 
 
 
 
baf4a02
38f2ab8
 
 
 
baf4a02
38f2ab8
 
 
baf4a02
 
38f2ab8
 
baf4a02
 
 
 
38f2ab8
 
 
 
 
 
baf4a02
38f2ab8
 
 
 
baf4a02
38f2ab8
 
 
baf4a02
 
38f2ab8
 
baf4a02
 
 
 
 
 
 
 
 
 
 
 
 
38f2ab8
 
baf4a02
 
38f2ab8
 
 
baf4a02
 
38f2ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
import gradio as gr
import pandas as pd
import numpy as np
import json
import re
import io
from datetime import datetime
from typing import List, Dict, Tuple
from transformers import pipeline, AutoTokenizer
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import sqlite3
import hashlib
import time

# Initialize models
sentiment_analyzer = pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment-latest")
# Use a simpler ABSA approach with keyword extraction instead of the problematic model
absa_analyzer = None

class ReviewAnalyzer:
    def __init__(self):
        self.db_path = "reviews.db"
        self._init_db()
        
    def _init_db(self):
        conn = sqlite3.connect(self.db_path)
        conn.execute('''
            CREATE TABLE IF NOT EXISTS usage_log (
                id INTEGER PRIMARY KEY,
                user_id TEXT,
                timestamp DATETIME,
                analysis_type TEXT,
                items_count INTEGER
            )
        ''')
        conn.close()
    
    def preprocess_text(self, text: str) -> str:
        """Clean and preprocess review text"""
        text = re.sub(r'http\S+', '', text)
        text = re.sub(r'[^\w\s]', '', text)
        text = text.strip().lower()
        return text
    
    def extract_aspect_keywords(self, reviews: List[str]) -> Dict:
        """Extract aspect-based sentiment keywords using rule-based approach"""
        positive_aspects = {}
        negative_aspects = {}
        detailed_aspects = []
        
        # Define aspect keywords
        aspect_keywords = {
            'quality': ['quality', 'build', 'material', 'durable', 'cheap', 'flimsy'],
            'price': ['price', 'cost', 'expensive', 'cheap', 'value', 'money', 'affordable'],
            'delivery': ['delivery', 'shipping', 'fast', 'slow', 'quick', 'late'],
            'service': ['service', 'support', 'staff', 'helpful', 'rude', 'friendly'],
            'design': ['design', 'look', 'beautiful', 'ugly', 'style', 'appearance'],
            'usability': ['easy', 'difficult', 'simple', 'complex', 'user-friendly'],
            'performance': ['performance', 'speed', 'fast', 'slow', 'efficient']
        }
        
        for review in reviews:
            if not review.strip() or len(review) < 10:
                continue
            
            # Get sentiment for the review
            try:
                sentiment_result = sentiment_analyzer(review)[0]
                review_sentiment = 'positive' if 'pos' in sentiment_result['label'].lower() else 'negative'
                confidence = float(sentiment_result['score'])
            except:
                continue
            
            review_lower = review.lower()
            
            # Check for aspect mentions
            for aspect, keywords in aspect_keywords.items():
                for keyword in keywords:
                    if keyword in review_lower:
                        # Determine if this specific aspect mention is positive or negative
                        aspect_sentiment = review_sentiment
                        
                        # Add to aspect counts
                        if aspect_sentiment == 'positive':
                            if aspect not in positive_aspects:
                                positive_aspects[aspect] = 0
                            positive_aspects[aspect] += 1
                        else:
                            if aspect not in negative_aspects:
                                negative_aspects[aspect] = 0
                            negative_aspects[aspect] += 1
                        
                        detailed_aspects.append({
                            'review': review[:50] + '...',
                            'aspect': aspect,
                            'sentiment': aspect_sentiment,
                            'confidence': round(confidence, 3)
                        })
                        break  # Only count each aspect once per review
        
        # Get top aspects
        top_positive = sorted(positive_aspects.items(), key=lambda x: x[1], reverse=True)[:10]
        top_negative = sorted(negative_aspects.items(), key=lambda x: x[1], reverse=True)[:10]
        
        return {
            'top_positive_aspects': top_positive,
            'top_negative_aspects': top_negative,
            'detailed_aspects': detailed_aspects,
            'summary': {
                'total_positive_aspects': len(positive_aspects),
                'total_negative_aspects': len(negative_aspects)
            }
        }
    
    def analyze_sentiment(self, reviews: List[str]) -> Dict:
        """Analyze sentiment of reviews with keyword extraction"""
        results = []
        sentiments = {'positive': 0, 'negative': 0, 'neutral': 0}
        
        for review in reviews:
            if not review.strip():
                continue
                
            clean_review = self.preprocess_text(review)
            result = sentiment_analyzer(clean_review)[0]
            
            label = result['label'].lower()
            score = float(result['score'])
            
            if 'pos' in label:
                sentiment = 'positive'
            elif 'neg' in label:
                sentiment = 'negative'
            else:
                sentiment = 'neutral'
            
            sentiments[sentiment] += 1
            results.append({
                'text': review[:100] + '...' if len(review) > 100 else review,
                'sentiment': sentiment,
                'confidence': round(score, 3)
            })
        
        total = len(results)
        sentiment_percentages = {k: round(v/total*100, 1) for k, v in sentiments.items()}
        
        # Extract keywords
        keywords = self.extract_aspect_keywords(reviews)
        
        return {
            'summary': sentiment_percentages,
            'details': results,
            'total_reviews': total,
            'keywords': keywords
        }
    
    def detect_fake_reviews(self, reviews: List[str], metadata: Dict = None) -> Dict:
        """Detect potentially fake reviews with optional metadata"""
        fake_scores = []
        
        # Process metadata if provided
        metadata_flags = []
        if metadata and 'timestamps' in metadata and 'usernames' in metadata:
            metadata_flags = self._analyze_metadata(metadata['timestamps'], metadata['usernames'])
        
        for i, review in enumerate(reviews):
            if not review.strip():
                continue
                
            score = 0
            flags = []
            
            # Text-based checks
            if len(review) < 20:
                score += 0.3
                flags.append("too_short")
            
            words = review.lower().split()
            unique_ratio = len(set(words)) / len(words) if words else 0
            if unique_ratio < 0.5:
                score += 0.4
                flags.append("repetitive")
            
            punct_ratio = len(re.findall(r'[!?.]', review)) / len(review) if review else 0
            if punct_ratio > 0.1:
                score += 0.2
                flags.append("excessive_punctuation")
            
            generic_phrases = ['amazing', 'perfect', 'best ever', 'highly recommend']
            if any(phrase in review.lower() for phrase in generic_phrases):
                score += 0.1
                flags.append("generic_language")
            
            # Add metadata flags if available
            if i < len(metadata_flags):
                if metadata_flags[i]:
                    score += 0.3
                    flags.extend(metadata_flags[i])
            
            fake_scores.append({
                'text': review[:100] + '...' if len(review) > 100 else review,
                'fake_probability': min(round(score, 3), 1.0),
                'status': 'suspicious' if score > 0.5 else 'authentic',
                'flags': flags
            })
        
        suspicious_count = sum(1 for item in fake_scores if item['fake_probability'] > 0.5)
        
        return {
            'summary': {
                'total_reviews': len(fake_scores),
                'suspicious_reviews': suspicious_count,
                'authenticity_rate': round((len(fake_scores) - suspicious_count) / len(fake_scores) * 100, 1) if fake_scores else 0
            },
            'details': fake_scores,
            'metadata_analysis': metadata_flags if metadata_flags else None
        }
    
    def _analyze_metadata(self, timestamps: List[str], usernames: List[str]) -> List[List[str]]:
        """Analyze metadata for suspicious patterns"""
        flags_per_review = [[] for _ in range(len(timestamps))]
        
        # Time density analysis
        if len(timestamps) >= 5:
            times = []
            for i, ts in enumerate(timestamps):
                try:
                    dt = datetime.strptime(ts, "%Y-%m-%d %H:%M:%S")
                    times.append((i, dt))
                except:
                    continue
            
            times.sort(key=lambda x: x[1])
            
            # Check for clusters
            for i in range(len(times) - 5):
                if (times[i + 5][1] - times[i][1]).total_seconds() < 300:  # 5 mins
                    for j in range(i, i + 6):
                        flags_per_review[times[j][0]].append("time_cluster")
        
        # Username pattern analysis
        for i, username in enumerate(usernames):
            if re.match(r"user_\d{4,}", username):
                flags_per_review[i].append("suspicious_username")
            if len(username) < 4:
                flags_per_review[i].append("short_username")
        
        return flags_per_review
    
    def assess_quality(self, reviews: List[str], custom_weights: Dict = None) -> Tuple[Dict, go.Figure]:
        """Assess review quality with customizable weights and radar chart"""
        default_weights = {
            'length': 0.25,
            'detail': 0.25,
            'structure': 0.25,
            'helpfulness': 0.25
        }
        
        weights = custom_weights if custom_weights else default_weights
        quality_scores = []
        
        for review in reviews:
            if not review.strip():
                continue
                
            factors = {}
            
            # Length factor
            length_score = min(len(review) / 200, 1.0)
            factors['length'] = round(length_score, 2)
            
            # Detail factor
            detail_words = ['because', 'however', 'although', 'specifically', 'particularly']
            detail_score = min(sum(1 for word in detail_words if word in review.lower()) / 3, 1.0)
            factors['detail'] = round(detail_score, 2)
            
            # Structure factor
            sentences = len(re.split(r'[.!?]', review))
            structure_score = min(sentences / 5, 1.0)
            factors['structure'] = round(structure_score, 2)
            
            # Helpfulness factor
            helpful_words = ['pros', 'cons', 'recommend', 'suggest', 'tip', 'advice']
            helpful_score = min(sum(1 for word in helpful_words if word in review.lower()) / 2, 1.0)
            factors['helpfulness'] = round(helpful_score, 2)
            
            # Calculate weighted score
            total_score = sum(factors[k] * weights[k] for k in factors.keys())
            
            quality_scores.append({
                'text': review[:100] + '...' if len(review) > 100 else review,
                'quality_score': round(total_score, 3),
                'factors': factors,
                'grade': 'A' if total_score > 0.8 else 'B' if total_score > 0.6 else 'C' if total_score > 0.4 else 'D'
            })
        
        avg_quality = sum(item['quality_score'] for item in quality_scores) / len(quality_scores) if quality_scores else 0
        
        # Create radar chart for average factors
        avg_factors = {}
        for factor in ['length', 'detail', 'structure', 'helpfulness']:
            avg_factors[factor] = float(sum(item['factors'][factor] for item in quality_scores) / len(quality_scores) if quality_scores else 0)
        
        fig = go.Figure()
        fig.add_trace(go.Scatterpolar(
            r=list(avg_factors.values()),
            theta=list(avg_factors.keys()),
            fill='toself',
            name='Quality Factors'
        ))
        
        fig.update_layout(
            polar=dict(
                radialaxis=dict(
                    visible=True,
                    range=[0, 1]
                )),
            showlegend=True,
            title="Average Quality Factors"
        )
        
        return {
            'summary': {
                'average_quality': round(avg_quality, 3),
                'total_reviews': len(quality_scores),
                'high_quality_count': sum(1 for item in quality_scores if item['quality_score'] > 0.7),
                'weights_used': weights
            },
            'details': quality_scores,
            'factor_averages': avg_factors
        }, fig
    
    def compare_competitors(self, product_a_reviews: List[str], product_b_reviews: List[str]) -> Tuple[Dict, go.Figure]:
        """Compare sentiment between two products"""
        analysis_a = self.analyze_sentiment(product_a_reviews)
        analysis_b = self.analyze_sentiment(product_b_reviews)
        
        fig = make_subplots(
            rows=1, cols=2,
            specs=[[{'type': 'pie'}, {'type': 'pie'}]],
            subplot_titles=['Product A', 'Product B']
        )
        
        fig.add_trace(go.Pie(
            labels=list(analysis_a['summary'].keys()),
            values=list(analysis_a['summary'].values()),
            name="Product A"
        ), row=1, col=1)
        
        fig.add_trace(go.Pie(
            labels=list(analysis_b['summary'].keys()),
            values=list(analysis_b['summary'].values()),
            name="Product B"
        ), row=1, col=2)
        
        fig.update_layout(title_text="Sentiment Comparison")
        
        comparison = {
            'product_a': analysis_a,
            'product_b': analysis_b,
            'winner': 'Product A' if analysis_a['summary']['positive'] > analysis_b['summary']['positive'] else 'Product B'
        }
        
        return comparison, fig
    
    def generate_report(self, analysis_data: Dict, report_type: str = "basic") -> str:
        """Generate analysis report with export capability"""
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        
        if report_type == "sentiment":
            keywords = analysis_data.get('keywords', {})
            top_pos = keywords.get('top_positive_aspects', [])[:5]
            top_neg = keywords.get('top_negative_aspects', [])[:5]
            
            return f"""# Sentiment Analysis Report
Generated: {timestamp}

## Summary
- Total Reviews: {analysis_data.get('total_reviews', 0)}
- Positive: {analysis_data.get('summary', {}).get('positive', 0)}%
- Negative: {analysis_data.get('summary', {}).get('negative', 0)}%
- Neutral: {analysis_data.get('summary', {}).get('neutral', 0)}%

## Top Positive Aspects
{chr(10).join([f"- {aspect[0]} (mentioned {aspect[1]} times)" for aspect in top_pos])}

## Top Negative Aspects  
{chr(10).join([f"- {aspect[0]} (mentioned {aspect[1]} times)" for aspect in top_neg])}

## Key Insights
- Overall sentiment: {'Positive' if analysis_data.get('summary', {}).get('positive', 0) > 50 else 'Mixed'}
- Main complaints: {', '.join([aspect[0] for aspect in top_neg[:3]])}
- Key strengths: {', '.join([aspect[0] for aspect in top_pos[:3]])}

## Recommendations
- Address negative aspects: {', '.join([aspect[0] for aspect in top_neg[:2]])}
- Leverage positive aspects in marketing
- Monitor sentiment trends over time
"""
        
        elif report_type == "fake":
            return f"""# Fake Review Detection Report
Generated: {timestamp}

## Summary
- Total Reviews: {analysis_data.get('summary', {}).get('total_reviews', 0)}
- Suspicious Reviews: {analysis_data.get('summary', {}).get('suspicious_reviews', 0)}
- Authenticity Rate: {analysis_data.get('summary', {}).get('authenticity_rate', 0)}%

## Risk Assessment
- Overall Risk: {'High' if analysis_data.get('summary', {}).get('authenticity_rate', 0) < 70 else 'Low'}
- Action Required: {'Yes' if analysis_data.get('summary', {}).get('suspicious_reviews', 0) > 0 else 'No'}

## Common Fraud Indicators
- Short reviews with generic language
- Repetitive content patterns
- Suspicious timing clusters
- Unusual username patterns
"""
        
        return "Report generated successfully"

# Global analyzer instance
analyzer = ReviewAnalyzer()

def process_reviews_input(text: str) -> List[str]:
    """Process review input text into list"""
    if not text.strip():
        return []
    
    reviews = []
    for line in text.split('\n'):
        line = line.strip()
        if line and len(line) > 10:
            reviews.append(line)
    
    return reviews

def process_csv_upload(file) -> Tuple[List[str], Dict]:
    """Process uploaded CSV file"""
    if file is None:
        return [], {}
    
    try:
        df = pd.read_csv(file.name)
        
        # Look for common column names
        review_col = None
        time_col = None
        user_col = None
        
        for col in df.columns:
            col_lower = col.lower()
            if 'review' in col_lower or 'comment' in col_lower or 'text' in col_lower:
                review_col = col
            elif 'time' in col_lower or 'date' in col_lower:
                time_col = col
            elif 'user' in col_lower or 'name' in col_lower:
                user_col = col
        
        if review_col is None:
            return [], {"error": "No review column found. Expected columns: 'review', 'comment', or 'text'"}
        
        reviews = df[review_col].dropna().astype(str).tolist()
        
        metadata = {}
        if time_col:
            metadata['timestamps'] = df[time_col].dropna().astype(str).tolist()
        if user_col:
            metadata['usernames'] = df[user_col].dropna().astype(str).tolist()
        
        return reviews, metadata
        
    except Exception as e:
        return [], {"error": f"Failed to process CSV: {str(e)}"}

def sentiment_analysis_interface(reviews_text: str, csv_file):
    """Interface for sentiment analysis"""
    reviews = []
    
    if csv_file is not None:
        reviews, metadata = process_csv_upload(csv_file)
        if 'error' in metadata:
            return metadata['error'], None
    else:
        reviews = process_reviews_input(reviews_text)
    
    if not reviews:
        return "Please enter reviews or upload a CSV file.", None
    
    try:
        result = analyzer.analyze_sentiment(reviews)
        
        fig = go.Figure(data=[
            go.Bar(x=list(result['summary'].keys()), 
                   y=list(result['summary'].values()),
                   marker_color=['green', 'red', 'gray'])
        ])
        fig.update_layout(title="Sentiment Distribution", yaxis_title="Percentage")
        
        return json.dumps(result, indent=2), fig
    except Exception as e:
        return f"Error: {str(e)}", None

def fake_detection_interface(reviews_text: str, csv_file):
    """Interface for fake review detection"""
    reviews = []
    metadata = {}
    
    if csv_file is not None:
        reviews, metadata = process_csv_upload(csv_file)
        if 'error' in metadata:
            return metadata['error']
    else:
        reviews = process_reviews_input(reviews_text)
    
    if not reviews:
        return "Please enter reviews or upload a CSV file."
    
    try:
        result = analyzer.detect_fake_reviews(reviews, metadata if metadata else None)
        return json.dumps(result, indent=2)
    except Exception as e:
        return f"Error: {str(e)}"

def quality_assessment_interface(reviews_text: str, csv_file, length_weight: float, detail_weight: float, structure_weight: float, help_weight: float):
    """Interface for quality assessment with custom weights"""
    reviews = []
    
    if csv_file is not None:
        reviews, metadata = process_csv_upload(csv_file)
        if 'error' in metadata:
            return metadata['error'], None
    else:
        reviews = process_reviews_input(reviews_text)
    
    if not reviews:
        return "Please enter reviews or upload a CSV file.", None
    
    try:
        custom_weights = {
            'length': length_weight,
            'detail': detail_weight,
            'structure': structure_weight,
            'helpfulness': help_weight
        }
        
        result, radar_fig = analyzer.assess_quality(reviews, custom_weights)
        return json.dumps(result, indent=2), radar_fig
    except Exception as e:
        return f"Error: {str(e)}", None

def competitor_comparison_interface(product_a_text: str, product_b_text: str):
    """Interface for competitor comparison"""
    if not product_a_text.strip() or not product_b_text.strip():
        return "Please enter reviews for both products.", None
    
    reviews_a = process_reviews_input(product_a_text)
    reviews_b = process_reviews_input(product_b_text)
    
    if not reviews_a or not reviews_b:
        return "Please provide valid reviews for both products.", None
    
    try:
        result, fig = analyzer.compare_competitors(reviews_a, reviews_b)
        return json.dumps(result, indent=2), fig
    except Exception as e:
        return f"Error: {str(e)}", None

def generate_report_interface(analysis_result: str, report_type: str):
    """Interface for report generation"""
    if not analysis_result.strip():
        return "No analysis data available. Please run an analysis first."
    
    try:
        data = json.loads(analysis_result)
        report = analyzer.generate_report(data, report_type.lower())
        return report
    except Exception as e:
        return f"Error generating report: {str(e)}"

# Create Gradio interface
with gr.Blocks(title="SmartReview Pro", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# πŸ›’ SmartReview Pro")
    gr.Markdown("Advanced review analysis platform with AI-powered insights")
    
    with gr.Tab("πŸ“Š Sentiment Analysis"):
        gr.Markdown("### Analyze customer sentiment and extract key aspects")
        with gr.Row():
            with gr.Column():
                sentiment_input = gr.Textbox(
                    lines=8,
                    placeholder="Enter reviews (one per line) or upload CSV...",
                    label="Reviews"
                )
                sentiment_csv = gr.File(
                    label="Upload CSV (columns: review/comment/text, optional: timestamp, username)",
                    file_types=[".csv"]
                )
                sentiment_btn = gr.Button("Analyze Sentiment", variant="primary")
            with gr.Column():
                sentiment_output = gr.Textbox(label="Analysis Results", lines=15)
                sentiment_chart = gr.Plot(label="Sentiment Distribution")
        
        sentiment_btn.click(
            sentiment_analysis_interface,
            inputs=[sentiment_input, sentiment_csv],
            outputs=[sentiment_output, sentiment_chart]
        )
    
    with gr.Tab("πŸ” Fake Review Detection"):
        gr.Markdown("### Detect suspicious reviews using text analysis and metadata")
        with gr.Row():
            with gr.Column():
                fake_input = gr.Textbox(
                    lines=8,
                    placeholder="Enter reviews to analyze...",
                    label="Reviews"
                )
                fake_csv = gr.File(
                    label="Upload CSV (supports timestamp & username analysis)",
                    file_types=[".csv"]
                )
                fake_btn = gr.Button("Detect Fake Reviews", variant="primary")
            with gr.Column():
                fake_output = gr.Textbox(label="Detection Results", lines=15)
        
        fake_btn.click(
            fake_detection_interface,
            inputs=[fake_input, fake_csv],
            outputs=[fake_output]
        )
    
    with gr.Tab("⭐ Quality Assessment"):
        gr.Markdown("### Assess review quality with customizable weights")
        with gr.Row():
            with gr.Column():
                quality_input = gr.Textbox(
                    lines=8,
                    placeholder="Enter reviews to assess...",
                    label="Reviews"
                )
                quality_csv = gr.File(
                    label="Upload CSV",
                    file_types=[".csv"]
                )
                
                gr.Markdown("**Customize Quality Weights:**")
                with gr.Row():
                    length_weight = gr.Slider(0, 1, 0.25, label="Length Weight")
                    detail_weight = gr.Slider(0, 1, 0.25, label="Detail Weight")
                with gr.Row():
                    structure_weight = gr.Slider(0, 1, 0.25, label="Structure Weight")
                    help_weight = gr.Slider(0, 1, 0.25, label="Helpfulness Weight")
                
                quality_btn = gr.Button("Assess Quality", variant="primary")
            with gr.Column():
                quality_output = gr.Textbox(label="Quality Assessment", lines=12)
                quality_radar = gr.Plot(label="Quality Factors Radar Chart")
        
        quality_btn.click(
            quality_assessment_interface,
            inputs=[quality_input, quality_csv, length_weight, detail_weight, structure_weight, help_weight],
            outputs=[quality_output, quality_radar]
        )
    
    with gr.Tab("πŸ†š Competitor Comparison"):
        gr.Markdown("### Compare sentiment between competing products")
        with gr.Row():
            with gr.Column():
                comp_product_a = gr.Textbox(
                    lines=8,
                    placeholder="Product A reviews...",
                    label="Product A Reviews"
                )
                comp_product_b = gr.Textbox(
                    lines=8,
                    placeholder="Product B reviews...",
                    label="Product B Reviews"
                )
                comp_btn = gr.Button("Compare Products", variant="primary")
            with gr.Column():
                comp_output = gr.Textbox(label="Comparison Results", lines=15)
                comp_chart = gr.Plot(label="Comparison Chart")
        
        comp_btn.click(
            competitor_comparison_interface,
            inputs=[comp_product_a, comp_product_b],
            outputs=[comp_output, comp_chart]
        )
    
    with gr.Tab("πŸ“‹ Report Generation"):
        gr.Markdown("### Generate professional analysis reports")
        with gr.Row():
            with gr.Column():
                report_data = gr.Textbox(
                    lines=10,
                    placeholder="Paste analysis results here...",
                    label="Analysis Data (JSON)"
                )
                report_type = gr.Dropdown(
                    choices=["sentiment", "fake", "quality"],
                    value="sentiment",
                    label="Report Type"
                )
                report_btn = gr.Button("Generate Report", variant="primary")
            with gr.Column():
                report_output = gr.Textbox(label="Generated Report", lines=15)
        
        report_btn.click(
            generate_report_interface,
            inputs=[report_data, report_type],
            outputs=[report_output]
        )

if __name__ == "__main__":
    demo.launch()