File size: 3,695 Bytes
a47da73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import streamlit as st
from transformers import pipeline
import matplotlib.pyplot as plt
import json
import langdetect
from keybert import KeyBERT

# Load models with caching
@st.cache_resource
def load_models():
    return {
        "emotion": pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True),
        "sentiment": pipeline("sentiment-analysis"),
        "summarization": pipeline("summarization"),
        "ner": pipeline("ner", grouped_entities=True),
        "toxicity": pipeline("text-classification", model="unitary/unbiased-toxic-roberta"),
        "keyword_extraction": KeyBERT()
    }

models=load_models()

# Function: Emotion Detection
def analyze_emotions(text):
    results = models["emotion"](text)
    emotions = {r['label']: round(r['score'], 2) for r in results[0]}
    return emotions

# Function: Sentiment Analysis
def analyze_sentiment(text):
    result = models["sentiment"](text)[0]
    return {result['label']: round(result['score'], 2)}

# Function: Text Summarization
def summarize_text(text):
    summary = models["summarization"](text["1024"])[0]['summary_text']      # Limit input to 1024 tokens
    return summary

# Function: Keyword Extraction
def extract_keywords(text):
    return models["keyword_extraction"].extract_keywords(text, keyphrase_ngram_range(1, 2), stop_words='english')

# Function: Named Entity Recognition (NER)
def analyze_ner(text):
    entities = models["ner"](text)
    return {entity["word"]: entity["entity_group"] for entity in entities}

# Function: Language Detection and Translation
def detect_language(text):
    try:
        lang = langdetect.detect(text)
        return lang
    except:
        return "Error detecting language"

# Function: Toxicity Detection
def detect_toxicity(text):
    results = models["toxicity"](text)
    return {results[0]['label']: round(results[0]['score'], 2)}


# Streamlit UI
st.title("๐Ÿš€ AI-Powered Text Intelligence App")
st.markdown("Analyze text with multiple NLP features: Emotion Detection, Sentiment Analysis, Summarization, NER, Keywords, Language Detection, and more!")

# User Input
text_input = st.text_area("Enter text to analyze:", "")

if st.button("Analyze Text"):
    if text_input.strip():
        st.subheader("๐Ÿ”น Emotion Detection")
        emotions = analyze_emotions(text_input)
        st.json(emotions)

        st.subheader("๐Ÿ”น Sentiment Analysis")
        sentiment = analyze_sentiment(text_input)
        st.json(sentiment)

        st.subheader("๐Ÿ”น Text Summarization")
        summary = summarize_text(text_input)
        st.write(summary)

        st.subheader("๐Ÿ”น Keyword Extraction")
        keywords = extract_keywords(text_input)
        st.json(keywords)

        st.subheader("๐Ÿ”น Named Entity Recognition (NER)")
        ner_data = analyze_ner(text_input)
        st.json(ner_data)

        st.subheader("๐Ÿ”น Language Detection")
        lang = detect_language(text_input)
        st.write(f"Detected Language: `{lang}`")

        st.subheader("๐Ÿ”น Toxicity Detection")
        toxicity = detect_toxicity(text_input)
        st.json(toxicity)


        # JSON Download
        result_data = {
            "emotion": emotions,
            "sentiment": sentiment,
            "summary": summary,
            "keywords": keywords,
            "ner": ner_data,
            "language": lang,
            "toxicity": toxicity
        }

        json_result = json.dumps(result_data, indent=2)
        st.download_button("Download Analysis Report", data=json_result, file_name="text_analysis.json", mime="application/json")
    else:
        st.warning("โš ๏ธ Please enter some text to analyze")