Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,56 @@ import torch
|
|
3 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
4 |
import re
|
5 |
|
6 |
-
#
|
|
|
|
|
|
|
7 |
|
8 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
def create_interface():
|
10 |
with gr.Blocks(title="AI Content Detector") as demo:
|
11 |
# UI भाग
|
|
|
3 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
4 |
import re
|
5 |
|
6 |
+
# Load AI detection model
|
7 |
+
MODEL_NAME = "roberta-base-openai-detector"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
9 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
|
10 |
|
11 |
+
# AI content detection function
|
12 |
+
def detect_ai_content(text):
|
13 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
14 |
+
with torch.no_grad():
|
15 |
+
outputs = model(**inputs)
|
16 |
+
scores = torch.nn.functional.softmax(outputs.logits, dim=1)
|
17 |
+
return scores[0][1].item() # AI probability
|
18 |
+
|
19 |
+
# Writing style analysis
|
20 |
+
def stylometry_analysis(text):
|
21 |
+
words = text.split()
|
22 |
+
avg_word_length = sum(len(word) for word in words) / len(words) if words else 0
|
23 |
+
complex_words_ratio = len([word for word in words if len(word) > 6]) / len(words) if words else 0
|
24 |
+
passive_voice_count = len(re.findall(r'\b(is|was|were|has been|have been|had been)\b \w+ed', text))
|
25 |
+
return avg_word_length, complex_words_ratio, passive_voice_count
|
26 |
+
|
27 |
+
# Semantic similarity analysis
|
28 |
+
def semantic_analysis(text):
|
29 |
+
keywords = ["AI", "generated", "neural network", "LLM", "GPT", "transformer"]
|
30 |
+
ai_patterns = sum([text.lower().count(keyword) for keyword in keywords])
|
31 |
+
return ai_patterns / len(text.split()) if text.split() else 0
|
32 |
+
|
33 |
+
# Final AI Detection Logic
|
34 |
+
def analyze_text(text):
|
35 |
+
ai_probability = detect_ai_content(text)
|
36 |
+
avg_word_length, complex_words_ratio, passive_voice_count = stylometry_analysis(text)
|
37 |
+
semantic_score = semantic_analysis(text)
|
38 |
+
|
39 |
+
is_ai_generated = (
|
40 |
+
ai_probability > 0.5 or
|
41 |
+
complex_words_ratio > 0.4 or
|
42 |
+
semantic_score > 0.2
|
43 |
+
)
|
44 |
+
|
45 |
+
result = "🟢 Human-Written" if not is_ai_generated else "🔴 AI-Generated"
|
46 |
+
|
47 |
+
return {
|
48 |
+
"Final Verdict": result,
|
49 |
+
"AI Probability": round(ai_probability, 2),
|
50 |
+
"Complex Words Ratio": round(complex_words_ratio, 2),
|
51 |
+
"Passive Voice Count": passive_voice_count,
|
52 |
+
"Semantic Score": round(semantic_score, 2)
|
53 |
+
}
|
54 |
+
|
55 |
+
# Gradio UI
|
56 |
def create_interface():
|
57 |
with gr.Blocks(title="AI Content Detector") as demo:
|
58 |
# UI भाग
|