sashtech's picture
Update app.py
33af001 verified
raw
history blame
4.38 kB
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from spellchecker import SpellChecker
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn
# Initialize FastAPI
app = FastAPI()
# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
# Initialize the spell checker
spell = SpellChecker()
# Ensure necessary NLTK data is downloaded
nltk.download('wordnet')
nltk.download('omw-1.4')
# Ensure the SpaCy model is installed
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
nlp = spacy.load("en_core_web_sm")
# Define the input model for FastAPI (for validation)
class TextInput(BaseModel):
text: str
# Function to predict the label and score for English text (AI Detection)
def predict_en(text):
res = pipeline_en(text)[0]
return res['label'], res['score']
# Function to rephrase text and replace words with their synonyms while maintaining form
def rephrase_with_synonyms(text):
doc = nlp(text)
rephrased_text = []
for token in doc:
pos_tag = None
if token.pos_ == "NOUN":
pos_tag = wordnet.NOUN
elif token.pos_ == "VERB":
pos_tag = wordnet.VERB
elif token.pos_ == "ADJ":
pos_tag = wordnet.ADJ
elif token.pos_ == "ADV":
pos_tag = wordnet.ADV
if pos_tag:
synonyms = get_synonyms_nltk(token.text, pos_tag)
if synonyms:
synonym = synonyms[0]
rephrased_text.append(synonym)
else:
rephrased_text.append(token.text)
else:
rephrased_text.append(token.text)
return ' '.join(rephrased_text)
# Function to paraphrase and correct text
def paraphrase_and_correct(text):
# [Place your processing logic here, such as removing redundant words, correcting grammar, etc.]
return rephrase_with_synonyms(text)
# Define FastAPI route for AI detection
@app.post("/ai-detect")
async def ai_detect(input: TextInput):
label, score = predict_en(input.text)
return {"label": label, "score": score}
# Define FastAPI route for paraphrasing and grammar correction
@app.post("/paraphrase")
async def paraphrase(input: TextInput):
corrected_text = paraphrase_and_correct(input.text)
return {"corrected_text": corrected_text}
# Function to get synonyms using NLTK WordNet
def get_synonyms_nltk(word, pos):
synsets = wordnet.synsets(word, pos=pos)
if synsets:
lemmas = synsets[0].lemmas()
return [lemma.name() for lemma in lemmas]
return []
# Set up Gradio UI
def gradio_ui():
with gr.Blocks() as demo:
with gr.Tab("AI Detection"):
t1 = gr.Textbox(lines=5, label='Text for AI Detection')
button1 = gr.Button("🤖 Predict AI Detection")
label1 = gr.Textbox(lines=1, label='Predicted Label')
score1 = gr.Textbox(lines=1, label='Prediction Score')
# Connect the prediction function to the Gradio UI
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
with gr.Tab("Paraphrasing & Grammar Correction"):
t2 = gr.Textbox(lines=5, label='Text for Paraphrasing and Grammar Correction')
button2 = gr.Button("🔄 Paraphrase and Correct")
result2 = gr.Textbox(lines=10, label='Corrected Text', placeholder="Corrected and paraphrased text will appear here")
# Connect the paraphrasing and correction function to the Gradio UI
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
# Start Gradio on port 7860 and share the app publicly
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
# Run both FastAPI and Gradio concurrently
if __name__ == "__main__":
import multiprocessing
# Run FastAPI server in one process
fastapi_process = multiprocessing.Process(target=uvicorn.run, args=(app,), kwargs={"host": "0.0.0.0", "port": 8000})
fastapi_process.start()
# Run Gradio interface in another process
gradio_ui()
# When done, stop both processes
fastapi_process.join()