File size: 4,381 Bytes
4fe52ab
 
b7577da
7feda08
90fff6b
7fc55d1
 
9ea0d50
29e4a04
 
 
 
33af001
 
236bb4b
90fff6b
 
 
9ea0d50
 
 
 
7fc55d1
51568dc
6ba2176
9ea0d50
6ba2176
 
 
fbc26ed
6ba2176
7feda08
33af001
 
29e4a04
 
59b2b8e
4fe52ab
59b2b8e
4fe52ab
e0913e2
 
4fe52ab
e0913e2
 
 
 
 
 
4fe52ab
e0913e2
4fe52ab
e0913e2
4fe52ab
e0913e2
4fe52ab
e0913e2
 
33af001
e0913e2
33af001
e0913e2
 
 
 
 
 
 
 
33af001
4fe52ab
33af001
 
 
 
 
 
 
29e4a04
 
33af001
 
 
 
29e4a04
 
33af001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0913e2
33af001
 
 
e0913e2
33af001
 
29e4a04
33af001
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from spellchecker import SpellChecker
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn

# Initialize FastAPI
app = FastAPI()

# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")

# Initialize the spell checker
spell = SpellChecker()

# Ensure necessary NLTK data is downloaded
nltk.download('wordnet')
nltk.download('omw-1.4')

# Ensure the SpaCy model is installed
try:
    nlp = spacy.load("en_core_web_sm")
except OSError:
    subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
    nlp = spacy.load("en_core_web_sm")

# Define the input model for FastAPI (for validation)
class TextInput(BaseModel):
    text: str

# Function to predict the label and score for English text (AI Detection)
def predict_en(text):
    res = pipeline_en(text)[0]
    return res['label'], res['score']

# Function to rephrase text and replace words with their synonyms while maintaining form
def rephrase_with_synonyms(text):
    doc = nlp(text)
    rephrased_text = []

    for token in doc:
        pos_tag = None
        if token.pos_ == "NOUN":
            pos_tag = wordnet.NOUN
        elif token.pos_ == "VERB":
            pos_tag = wordnet.VERB
        elif token.pos_ == "ADJ":
            pos_tag = wordnet.ADJ
        elif token.pos_ == "ADV":
            pos_tag = wordnet.ADV
        
        if pos_tag:
            synonyms = get_synonyms_nltk(token.text, pos_tag)
            if synonyms:
                synonym = synonyms[0]
                rephrased_text.append(synonym)
            else:
                rephrased_text.append(token.text)
        else:
            rephrased_text.append(token.text)

    return ' '.join(rephrased_text)

# Function to paraphrase and correct text
def paraphrase_and_correct(text):
    # [Place your processing logic here, such as removing redundant words, correcting grammar, etc.]
    return rephrase_with_synonyms(text)

# Define FastAPI route for AI detection
@app.post("/ai-detect")
async def ai_detect(input: TextInput):
    label, score = predict_en(input.text)
    return {"label": label, "score": score}

# Define FastAPI route for paraphrasing and grammar correction
@app.post("/paraphrase")
async def paraphrase(input: TextInput):
    corrected_text = paraphrase_and_correct(input.text)
    return {"corrected_text": corrected_text}

# Function to get synonyms using NLTK WordNet
def get_synonyms_nltk(word, pos):
    synsets = wordnet.synsets(word, pos=pos)
    if synsets:
        lemmas = synsets[0].lemmas()
        return [lemma.name() for lemma in lemmas]
    return []

# Set up Gradio UI
def gradio_ui():
    with gr.Blocks() as demo:
        with gr.Tab("AI Detection"):
            t1 = gr.Textbox(lines=5, label='Text for AI Detection')
            button1 = gr.Button("🤖 Predict AI Detection")
            label1 = gr.Textbox(lines=1, label='Predicted Label')
            score1 = gr.Textbox(lines=1, label='Prediction Score')

            # Connect the prediction function to the Gradio UI
            button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])

        with gr.Tab("Paraphrasing & Grammar Correction"):
            t2 = gr.Textbox(lines=5, label='Text for Paraphrasing and Grammar Correction')
            button2 = gr.Button("🔄 Paraphrase and Correct")
            result2 = gr.Textbox(lines=10, label='Corrected Text', placeholder="Corrected and paraphrased text will appear here")

            # Connect the paraphrasing and correction function to the Gradio UI
            button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)

    # Start Gradio on port 7860 and share the app publicly
    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)

# Run both FastAPI and Gradio concurrently
if __name__ == "__main__":
    import multiprocessing

    # Run FastAPI server in one process
    fastapi_process = multiprocessing.Process(target=uvicorn.run, args=(app,), kwargs={"host": "0.0.0.0", "port": 8000})
    fastapi_process.start()

    # Run Gradio interface in another process
    gradio_ui()

    # When done, stop both processes
    fastapi_process.join()