SabkeSawaal68 commited on
Commit
2bdae08
·
verified ·
1 Parent(s): 2abe106

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -103
app.py CHANGED
@@ -3,106 +3,55 @@ import torch
3
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
4
  import re
5
 
6
- # Load AI detection model
7
- MODEL_NAME = "roberta-base-openai-detector"
8
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
- model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
10
-
11
- # AI content detection function
12
- def detect_ai_content(text):
13
- inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
14
- with torch.no_grad():
15
- outputs = model(**inputs)
16
- scores = torch.nn.functional.softmax(outputs.logits, dim=1)
17
- return scores[0][1].item() # AI probability
18
-
19
- # Writing style analysis
20
- def stylometry_analysis(text):
21
- words = text.split()
22
- avg_word_length = sum(len(word) for word in words) / len(words) if words else 0
23
- complex_words_ratio = len([word for word in words if len(word) > 6]) / len(words) if words else 0
24
- passive_voice_count = len(re.findall(r'\b(is|was|were|has been|have been|had been)\b \w+ed', text))
25
- return avg_word_length, complex_words_ratio, passive_voice_count
26
-
27
- # Semantic similarity analysis
28
- def semantic_analysis(text):
29
- keywords = ["AI", "generated", "neural network", "LLM", "GPT", "transformer"]
30
- ai_patterns = sum([text.lower().count(keyword) for keyword in keywords])
31
- return ai_patterns / len(text.split()) if text.split() else 0
32
-
33
- # Final AI Detection Logic
34
- def analyze_text(text):
35
- ai_probability = detect_ai_content(text)
36
- avg_word_length, complex_words_ratio, passive_voice_count = stylometry_analysis(text)
37
- semantic_score = semantic_analysis(text)
38
-
39
- is_ai_generated = (
40
- ai_probability > 0.5 or
41
- complex_words_ratio > 0.4 or
42
- semantic_score > 0.2
43
- )
44
-
45
- result = "🟢 Human-Written" if not is_ai_generated else "🔴 AI-Generated"
46
-
47
- return {
48
- "Final Verdict": result,
49
- "AI Probability": round(ai_probability, 2),
50
- "Complex Words Ratio": round(complex_words_ratio, 2),
51
- "Passive Voice Count": passive_voice_count,
52
- "Semantic Score": round(semantic_score, 2)
53
- }
54
-
55
- # Gradio UI
56
- def gradio_interface(text):
57
- result = analyze_text(text)
58
- return (
59
- result["Final Verdict"],
60
- f"{result['AI Probability']:.2f}",
61
- f"{result['Complex Words Ratio']:.2f}",
62
- str(result["Passive Voice Count"]),
63
- f"{result['Semantic Score']:.2f}"
64
- )
65
-
66
- # Create API Interface
67
- api_interface = gr.Interface(
68
- fn=gradio_interface,
69
- inputs="text",
70
- outputs=["text", "text", "text", "text", "text"]
71
- )
72
-
73
- # Launch API
74
- api_interface.launch(server_name="0.0.0.0", server_port=7860)
75
-
76
- # Run UI
77
- demo = gr.Blocks()
78
- with demo:
79
- gr.Markdown("# 🚀 Self-Learning AI Content Detector")
80
- gr.Markdown("Detect AI-generated text and analyze writing patterns.")
81
-
82
- with gr.Row():
83
- text_input = gr.Textbox(label="Enter text to analyze:", lines=5)
84
- analyze_button = gr.Button("🔍 Analyze")
85
-
86
- with gr.Row():
87
- final_verdict = gr.Textbox(label="Final Verdict", interactive=False)
88
- ai_prob = gr.Textbox(label="AI Probability", interactive=False)
89
- complex_ratio = gr.Textbox(label="Complex Words Ratio", interactive=False)
90
- passive_voice = gr.Textbox(label="Passive Voice Count", interactive=False)
91
- semantic_score = gr.Textbox(label="Semantic Score", interactive=False)
92
-
93
- analyze_button.click(gradio_interface, inputs=[text_input], outputs=[final_verdict, ai_prob, complex_ratio, passive_voice, semantic_score])
94
-
95
- demo.launch()
96
-
97
- import gradio as gr
98
-
99
- def analyze_text(text):
100
- return {"message": "API is working!", "input_text": text}
101
-
102
- api_interface = gr.Interface(
103
- fn=analyze_text,
104
- inputs="text",
105
- outputs="json"
106
- )
107
-
108
- api_interface.launch(server_name="0.0.0.0", server_port=7860)
 
3
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
4
  import re
5
 
6
+ # ... (आपका मौजूदा डिटेक्शन कोड यहाँ रहेगा) ...
7
+
8
+ # ग्रेडियो इंटरफेस को एकीकृत करें
9
+ def create_interface():
10
+ with gr.Blocks(title="AI Content Detector") as demo:
11
+ # UI भाग
12
+ gr.Markdown("# 🚀 Self-Learning AI Content Detector")
13
+
14
+ with gr.Row():
15
+ input_text = gr.Textbox(label="Enter Text", lines=5)
16
+ analyze_btn = gr.Button("Analyze")
17
+
18
+ with gr.Row():
19
+ final_verdict = gr.Textbox(label="Final Verdict")
20
+ ai_prob = gr.Number(label="AI Probability")
21
+ complex_ratio = gr.Number(label="Complex Words Ratio")
22
+ passive_voice = gr.Number(label="Passive Voice Count")
23
+ semantic_score = gr.Number(label="Semantic Score")
24
+
25
+ # API भाग
26
+ gr.Markdown("## API Documentation")
27
+ gr.HTML("""
28
+ <div>
29
+ <h3>API Usage:</h3>
30
+ <p>POST to /api/analyze with JSON payload:</p>
31
+ <pre>
32
+ {
33
+ "text": "Your text here"
34
+ }
35
+ </pre>
36
+ </div>
37
+ """)
38
+
39
+ # इवेंट हैंडलिंग
40
+ analyze_btn.click(
41
+ fn=analyze_text,
42
+ inputs=input_text,
43
+ outputs=[final_verdict, ai_prob, complex_ratio, passive_voice, semantic_score],
44
+ api_name="analyze" # API एंडपॉइंट नाम
45
+ )
46
+
47
+ return demo
48
+
49
+ # मुख्य एप्लिकेशन
50
+ if __name__ == "__main__":
51
+ demo = create_interface()
52
+ demo.launch(
53
+ server_name="0.0.0.0",
54
+ server_port=7860,
55
+ share=False,
56
+ enable_api=True # API एक्टिवेट करें
57
+ )