Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
import re | |
# Load AI detection model | |
MODEL_NAME = "roberta-base-openai-detector" | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) | |
# AI content detection function | |
def detect_ai_content(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
scores = torch.nn.functional.softmax(outputs.logits, dim=1) | |
return scores[0][1].item() # AI probability | |
# Writing style analysis | |
def stylometry_analysis(text): | |
words = text.split() | |
avg_word_length = sum(len(word) for word in words) / len(words) if words else 0 | |
complex_words_ratio = len([word for word in words if len(word) > 6]) / len(words) if words else 0 | |
passive_voice_count = len(re.findall(r'\b(is|was|were|has been|have been|had been)\b \w+ed', text)) | |
return avg_word_length, complex_words_ratio, passive_voice_count | |
# Semantic similarity analysis | |
def semantic_analysis(text): | |
keywords = ["AI", "generated", "neural network", "LLM", "GPT", "transformer"] | |
ai_patterns = sum([text.lower().count(keyword) for keyword in keywords]) | |
return ai_patterns / len(text.split()) if text.split() else 0 | |
# Final AI Detection Logic | |
def analyze_text(text): | |
ai_probability = detect_ai_content(text) | |
avg_word_length, complex_words_ratio, passive_voice_count = stylometry_analysis(text) | |
semantic_score = semantic_analysis(text) | |
is_ai_generated = ( | |
ai_probability > 0.5 or | |
complex_words_ratio > 0.4 or | |
semantic_score > 0.2 | |
) | |
result = "🟢 Human-Written" if not is_ai_generated else "🔴 AI-Generated" | |
return { | |
"Final Verdict": result, | |
"AI Probability": round(ai_probability, 2), | |
"Complex Words Ratio": round(complex_words_ratio, 2), | |
"Passive Voice Count": passive_voice_count, | |
"Semantic Score": round(semantic_score, 2) | |
} | |
# Gradio UI | |
def create_interface(): | |
with gr.Blocks(title="AI Content Detector") as demo: | |
# UI भाग | |
gr.Markdown("# 🚀 Self-Learning AI Content Detector") | |
with gr.Row(): | |
input_text = gr.Textbox(label="Enter Text", lines=5) | |
analyze_btn = gr.Button("Analyze") | |
with gr.Row(): | |
final_verdict = gr.Textbox(label="Final Verdict") | |
ai_prob = gr.Number(label="AI Probability") | |
complex_ratio = gr.Number(label="Complex Words Ratio") | |
passive_voice = gr.Number(label="Passive Voice Count") | |
semantic_score = gr.Number(label="Semantic Score") | |
# API भाग | |
gr.Markdown("## API Documentation") | |
gr.HTML(""" | |
<div> | |
<h3>API Usage:</h3> | |
<p>POST to /api/analyze with JSON payload:</p> | |
<pre> | |
{ | |
"text": "Your text here" | |
} | |
</pre> | |
</div> | |
""") | |
# इवेंट हैंडलिंग | |
analyze_btn.click( | |
fn=analyze_text, | |
inputs=input_text, | |
outputs=[final_verdict, ai_prob, complex_ratio, passive_voice, semantic_score], | |
api_name="analyze" # API एंडपॉइंट नाम | |
) | |
return demo | |
# मुख्य एप्लिकेशन | |
if __name__ == "__main__": | |
demo = create_interface() | |
demo.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=False, | |
enable_api=True # API एक्टिवेट करें | |
) |