import nltk import re import pandas as pd from nltk.corpus import stopwords from nltk.sentiment.vader import SentimentIntensityAnalyzer # Load the required NLTK corpora nltk.download("vader_lexicon") nltk.download("stopwords") # Initialize the SentimentIntensityAnalyzer sia = SentimentIntensityAnalyzer() # Define a function to clean the text def clean_text(text): text = re.sub("[^a-zA-Z]", " ", text) text = text.lower() text = text.split() text = [word for word in text if word not in set(stopwords.words("english"))] text = " ".join(text) return text # Define a function to calculate the sentiment score def sentiment_score(text): score = sia.polarity_scores(text) return score["compound"] # Read the data into a pandas DataFrame df = pd.read_csv("content.csv") # Clean the text and calculate the sentiment score df["Clean_Text"] = df["Content"].apply(lambda x: clean_text(x)) df["Sentiment_Score"] = df["Clean_Text"].apply(lambda x: sentiment_score(x)) # Classify the content quality based on the sentiment score df["Content_Quality"] = df["Sentiment_Score"].apply(lambda x: "Good" if x >= 0.5 else "Bad") # Print the final result print(df) from flask import Flask, request, render_template app = Flask(__name__) @app.route("/", methods=["GET", "POST"]) def index(): if request.method == "POST": content = request.form["content"] clean_text = clean_text(content) sentiment_score = sentiment_score(clean_text) content_quality = "Good" if sentiment_score >= 0.5 else "Bad" return render_template("index.html", content_quality=content_quality) return render_template("index.html") if __name__ == "__main__": app.run(host="0.0.0.0",port=7860,debug=True)