conversantech commited on
Commit
0004d04
Β·
verified Β·
1 Parent(s): ba69aa1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +326 -428
app.py CHANGED
@@ -4,8 +4,10 @@ import random
4
  import re
5
  import nltk
6
  from nltk.tokenize import sent_tokenize, word_tokenize
 
7
  from textstat import flesch_reading_ease, flesch_kincaid_grade
8
  import string
 
9
 
10
  # Setup NLTK download path for Hugging Face Spaces
11
  os.environ['NLTK_DATA'] = '/tmp/nltk_data'
@@ -16,7 +18,8 @@ def download_nltk_data():
16
  os.makedirs('/tmp/nltk_data', exist_ok=True)
17
  nltk.data.path.append('/tmp/nltk_data')
18
 
19
- required_data = ['punkt_tab', 'punkt', 'averaged_perceptron_tagger', 'stopwords']
 
20
 
21
  for data in required_data:
22
  try:
@@ -34,346 +37,301 @@ download_nltk_data()
34
 
35
  class AdvancedAIHumanizer:
36
  def __init__(self):
37
- self.setup_comprehensive_patterns()
38
-
39
- def setup_comprehensive_patterns(self):
40
- """Setup the most comprehensive humanization patterns"""
41
-
42
- # AI-flagged terms that must be completely eliminated
43
- self.ai_death_terms = {
44
- r'\brealm\b': ["world", "space", "area", "field", "zone"],
45
- r'\bdelve\b': ["dig into", "explore", "look at", "check out", "dive into"],
46
- r'\bembark\b': ["start", "begin", "kick off", "jump into", "get going"],
47
- r'\ba testament to\b': ["shows", "proves", "demonstrates", "reflects"],
48
- r'\bthe landscape of\b': ["the world of", "what's happening in", "the scene in"],
49
- r'\bnavigating\b': ["dealing with", "handling", "figuring out", "working through"],
50
- r'\bmeticulous\b': ["super careful", "really detailed", "thorough", "precise"],
51
- r'\bintricate\b': ["complex", "detailed", "tricky", "complicated"],
52
- r'\bfurthermore\b': ["plus", "also", "and", "on top of that"],
53
- r'\bmoreover\b': ["also", "plus", "and", "what's more"],
54
- r'\bhowever\b': ["but", "though", "yet", "still"],
55
- r'\bnevertheless\b': ["but", "still", "even so", "anyway"],
56
- r'\btherefore\b': ["so", "that's why", "because of this", "which means"],
57
- r'\bconsequently\b': ["so", "as a result", "because of this", "that's why"],
58
- r'\bin conclusion\b': ["to wrap up", "bottom line", "all in all", "so basically"],
59
- r'\bit is important to note\b': ["worth mentioning", "keep in mind", "remember"],
60
- r'\bit should be noted\b': ["remember", "keep in mind", "don't forget"],
61
- r'\bsignificant\b': ["big", "major", "huge", "important"],
62
- r'\bsubstantial\b': ["big", "large", "major", "huge"],
63
- r'\bcomprehensive\b': ["complete", "full", "thorough", "detailed"],
64
- r'\boptimal\b': ["best", "perfect", "ideal", "top-notch"],
65
- r'\bfacilitate\b': ["help", "make easier", "enable", "assist with"],
66
- r'\butilize\b': ["use", "work with", "employ", "make use of"],
67
- r'\bleverage\b': ["use", "take advantage of", "make use of", "tap into"],
68
- r'\benhance\b': ["improve", "boost", "make better", "upgrade"],
69
- r'\bimplement\b': ["put in place", "set up", "start using", "roll out"],
70
- r'\bparadigm\b': ["approach", "way", "method", "system"],
71
- r'\bmethodology\b': ["method", "approach", "way", "system"],
72
- r'\bsynergy\b': ["teamwork", "working together", "collaboration"],
73
- r'\boptimize\b': ["improve", "make better", "fine-tune", "perfect"],
74
- r'\bstreamline\b': ["simplify", "make easier", "smooth out"],
75
- r'\brobust\b': ["strong", "solid", "reliable", "tough"],
76
- r'\bscalable\b': ["flexible", "adaptable", "expandable"],
77
- r'\bseamless\b': ["smooth", "easy", "effortless", "simple"],
78
- r'\binnovative\b': ["new", "creative", "fresh", "cutting-edge"],
79
- r'\bgroundbreaking\b': ["amazing", "revolutionary", "game-changing"],
80
- r'\btransformative\b': ["life-changing", "game-changing", "revolutionary"],
81
- r'\bparadigm shift\b': ["big change", "major shift", "game changer"],
82
- r'\bgame changer\b': ["total game changer", "complete shift", "major breakthrough"],
83
- r'\bcutting-edge\b': ["latest", "newest", "state-of-the-art", "advanced"],
84
- r'\bstate-of-the-art\b': ["latest", "newest", "most advanced", "top-notch"]
85
  }
86
 
87
- # Extensive contractions for natural speech
88
- self.contractions = {
89
- r'\bdo not\b': "don't", r'\bdoes not\b': "doesn't", r'\bdid not\b': "didn't",
90
- r'\bwill not\b': "won't", r'\bwould not\b': "wouldn't", r'\bcould not\b': "couldn't",
91
- r'\bshould not\b': "shouldn't", r'\bcannot\b': "can't", r'\bis not\b': "isn't",
92
- r'\bare not\b': "aren't", r'\bwas not\b': "wasn't", r'\bwere not\b': "weren't",
93
- r'\bhave not\b': "haven't", r'\bhas not\b': "hasn't", r'\bhad not\b': "hadn't",
94
- r'\bI will\b': "I'll", r'\byou will\b': "you'll", r'\bhe will\b': "he'll",
95
- r'\bshe will\b': "she'll", r'\bwe will\b': "we'll", r'\bthey will\b': "they'll",
96
- r'\bI would\b': "I'd", r'\byou would\b': "you'd", r'\bI have\b': "I've",
97
- r'\byou have\b': "you've", r'\bwe have\b': "we've", r'\bthey have\b': "they've",
98
- r'\bthat is\b': "that's", r'\bit is\b': "it's", r'\bwho is\b': "who's",
99
- r'\bwhat is\b': "what's", r'\bwhere is\b': "where's", r'\bwhen is\b': "when's",
100
- r'\bhow is\b': "how's", r'\bwhy is\b': "why's", r'\bthere is\b': "there's",
101
- r'\bthere are\b': "there're", r'\bhere is\b': "here's"
102
  }
103
 
104
- # Conversation starters that sound human
105
- self.human_starters = [
106
- "Look,", "Listen,", "Here's the thing -", "You know what?", "Honestly,",
107
- "Real talk -", "Let me tell you,", "So here's what I think -", "Okay, so",
108
- "Right, so", "Well,", "Actually,", "Basically,", "Here's what's wild -",
109
- "Get this -", "Check it out -", "So I was thinking -", "You know what's crazy?",
110
- "Here's something interesting -", "Let me break this down for you -"
111
- ]
112
-
113
- # Natural fillers and expressions
114
- self.natural_fillers = [
115
- "you know", "I mean", "like", "actually", "basically", "honestly",
116
- "literally", "obviously", "clearly", "definitely", "pretty much",
117
- "kind of", "sort of", "more or less", "at the end of the day",
118
- "when it comes down to it", "if you ask me", "in my experience",
119
- "from what I've seen", "the way I see it", "real quick", "super quick",
120
- "really fast", "pretty cool", "kinda weird", "sorta like"
121
- ]
122
-
123
- # Personal perspective phrases
124
- self.personal_voices = [
125
- "I think", "in my opinion", "from my experience", "personally",
126
- "if you ask me", "the way I see it", "from what I've seen",
127
- "in my view", "as I see it", "my take is", "I believe",
128
- "it seems to me", "I'd say", "my guess is", "from where I sit",
129
- "in my book", "if I'm being honest", "to be real with you"
130
  ]
131
 
132
- # Casual replacements for formal words
133
- self.casual_replacements = {
134
- r'\bvery\b': ["super", "really", "pretty", "totally", "extremely"],
135
- r'\bextremely\b': ["super", "really", "incredibly", "totally"],
136
- r'\bexcellent\b': ["awesome", "amazing", "fantastic", "great"],
137
- r'\bsuperior\b': ["better", "way better", "much better", "superior"],
138
- r'\binferior\b': ["worse", "not as good", "weaker", "inferior"],
139
- r'\bnumerous\b': ["tons of", "loads of", "plenty of", "lots of"],
140
- r'\bmultiple\b': ["several", "a bunch of", "various", "different"],
141
- r'\badditional\b': ["more", "extra", "other", "additional"],
142
- r'\bsubsequent\b': ["next", "following", "after that", "then"],
143
- r'\bprevious\b': ["last", "earlier", "before", "previous"],
144
- r'\binitial\b': ["first", "starting", "beginning", "initial"],
145
- r'\bfinal\b': ["last", "ending", "final", "closing"]
146
  }
147
 
148
- def extract_core_meaning(self, sentence):
149
- """Extract the core meaning from a sentence for reconstruction"""
150
- # Remove common AI phrases and get to the point
151
- cleaned = sentence
152
- for pattern in self.ai_death_terms.keys():
153
- cleaned = re.sub(pattern, "", cleaned, flags=re.IGNORECASE)
154
-
155
- # Simplify and extract main idea
156
- cleaned = re.sub(r'\b(that|which|who)\b', '', cleaned)
157
- cleaned = re.sub(r'\s+', ' ', cleaned).strip()
158
-
159
- return cleaned
160
-
161
- def aggressive_phrase_elimination(self, text):
162
- """Aggressively eliminate all AI-flagged terms"""
163
- for pattern, replacements in self.ai_death_terms.items():
164
- count = 0
165
- while re.search(pattern, text, re.IGNORECASE) and count < 10:
166
- replacement = random.choice(replacements)
167
- text = re.sub(pattern, replacement, text, flags=re.IGNORECASE, count=1)
168
- count += 1
169
- return text
170
-
171
- def add_extensive_contractions(self, text):
172
- """Add comprehensive contractions"""
173
- for pattern, contraction in self.contractions.items():
174
- text = re.sub(pattern, contraction, text, flags=re.IGNORECASE)
175
- return text
176
 
177
- def inject_personality(self, text):
178
- """Inject strong personality and voice"""
179
- sentences = sent_tokenize(text)
180
- personality_injected = []
181
-
182
- for i, sentence in enumerate(sentences):
183
- # Add personal starters
184
- if random.random() < 0.4:
185
- starter = random.choice(self.human_starters)
186
- sentence = f"{starter} {sentence.lower()}"
187
- sentence = sentence[0].upper() + sentence[1:]
188
 
189
- # Add personal opinions
190
- if random.random() < 0.3:
191
- opinion = random.choice(self.personal_voices)
192
- sentence = f"{opinion}, {sentence.lower()}"
193
- sentence = sentence[0].upper() + sentence[1:]
194
 
195
- # Add casual expressions
196
- if random.random() < 0.35:
197
- words = sentence.split()
198
- if len(words) > 5:
199
- insert_pos = random.randint(2, min(len(words)-2, 5))
200
- filler = random.choice(self.natural_fillers)
201
- words.insert(insert_pos, f"{filler},")
202
- sentence = " ".join(words)
 
 
 
 
 
 
 
 
 
203
 
204
- personality_injected.append(sentence)
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- return " ".join(personality_injected)
 
 
 
 
 
 
207
 
208
- def add_human_imperfections(self, text):
209
- """Add natural human speech patterns and imperfections"""
210
  sentences = sent_tokenize(text)
211
- imperfect_sentences = []
212
 
213
- for sentence in sentences:
214
- # Add hesitations and self-corrections
215
- if random.random() < 0.15:
216
- hesitations = ["um,", "uh,", "well,", "so,", "like,", "you know,"]
217
- hesitation = random.choice(hesitations)
218
- sentence = f"{hesitation} {sentence.lower()}"
219
- sentence = sentence[0].upper() + sentence[1:]
220
 
221
- # Add self-corrections
222
- if random.random() < 0.1:
223
- corrections = ["I mean", "or rather", "well, actually", "sorry", "wait"]
224
- correction = random.choice(corrections)
225
- words = sentence.split()
226
- if len(words) > 6:
227
- insert_pos = random.randint(3, len(words) - 2)
228
- words.insert(insert_pos, f"β€” {correction} β€”")
229
- sentence = " ".join(words)
230
 
231
- # Add emphasis and interjections
232
- if random.random() < 0.2:
233
- emphasis = ["(seriously)", "(no joke)", "(trust me)", "(for real)", "(I'm not kidding)"]
234
- sentence += f" {random.choice(emphasis)}"
235
 
236
- imperfect_sentences.append(sentence)
237
-
238
- return " ".join(imperfect_sentences)
239
 
240
- def restructure_sentences_completely(self, text):
241
- """Completely restructure sentences to break AI patterns"""
242
- sentences = sent_tokenize(text)
243
- restructured = []
 
 
 
 
 
244
 
245
- for sentence in sentences:
246
- words = sentence.split()
247
-
248
- # Break long sentences into conversational chunks
249
- if len(words) > 12:
250
- mid_point = len(words) // 2
251
- first_part = " ".join(words[:mid_point])
252
- second_part = " ".join(words[mid_point:])
253
 
254
- connectors = [
255
- "Here's the thing:",
256
- "Let me explain:",
257
- "Think about it:",
258
- "What I mean is:",
259
- "Here's what's interesting:",
260
- "Check this out:"
261
- ]
 
 
262
 
 
 
263
  connector = random.choice(connectors)
264
- new_sentence = f"{first_part}. {connector} {second_part.lower()}"
265
- restructured.append(new_sentence)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  else:
267
- # Add conversational elements to shorter sentences
268
- if random.random() < 0.3:
269
- questions = ["Right?", "You know?", "Make sense?", "See what I mean?", "You feel me?"]
270
- sentence += f" {random.choice(questions)}"
271
 
272
- restructured.append(sentence)
273
-
274
- return " ".join(restructured)
275
 
276
- def add_specific_examples(self, text):
277
- """Add specific examples and numbers to make content unique"""
278
  sentences = sent_tokenize(text)
279
- enhanced = []
280
 
281
  for sentence in sentences:
282
- enhanced.append(sentence)
283
-
284
- # Randomly add specific examples
285
- if random.random() < 0.25:
286
- examples = [
287
- "For example, I saw this happen just last week when...",
288
- "Like, take my friend Sarah - she tried this and...",
289
- "I remember reading about a company that increased their results by 150% doing this...",
290
- "Just yesterday, I was talking to someone who...",
291
- "There was this study I read that showed 73% of people...",
292
- "My neighbor actually tried this approach and...",
293
- ]
294
- enhanced.append(random.choice(examples))
295
-
296
- return " ".join(enhanced)
297
-
298
- def apply_casual_language(self, text):
299
- """Apply casual language patterns"""
300
- for pattern, replacements in self.casual_replacements.items():
301
- if re.search(pattern, text, re.IGNORECASE):
302
- replacement = random.choice(replacements)
303
- text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
304
-
305
- return text
306
-
307
- def add_conversational_flow(self, text):
308
- """Add natural conversational flow"""
309
- paragraphs = text.split('\n\n')
310
- conversational_paragraphs = []
311
-
312
- for para in paragraphs:
313
- sentences = sent_tokenize(para)
314
 
315
- # Add transitions between sentences
316
- enhanced_sentences = []
317
- for i, sentence in enumerate(sentences):
318
- enhanced_sentences.append(sentence)
319
 
320
- # Add conversational bridges
321
- if i < len(sentences) - 1 and random.random() < 0.2:
322
- bridges = [
323
- "And here's the crazy part:",
324
- "But wait, there's more:",
325
- "Now, here's where it gets interesting:",
326
- "Oh, and another thing:",
327
- "Plus, get this:"
328
- ]
329
- enhanced_sentences.append(random.choice(bridges))
330
 
331
- conversational_paragraphs.append(" ".join(enhanced_sentences))
 
 
 
 
 
 
 
 
 
332
 
333
- return "\n\n".join(conversational_paragraphs)
 
 
 
 
 
334
 
335
- def final_cleanup_and_polish(self, text):
336
- """Final cleanup while maintaining human imperfections"""
337
- # Fix basic spacing
338
  text = re.sub(r'\s+', ' ', text)
339
- text = re.sub(r'\s+([,.!?])', r'\1', text)
 
340
 
341
- # Fix capitalization but keep some casual elements
342
- text = re.sub(r'([.!?]\s+)([a-z])', lambda m: m.group(1) + m.group(2).upper(), text)
 
343
 
344
- # Ensure first letter is capitalized
345
- if text and text[0].islower():
346
- text = text[0].upper() + text[1:]
 
 
 
347
 
348
- # Keep some casual punctuation
349
- text = re.sub(r'([.!?]){3,}', r'\1\1\1', text)
 
350
 
351
  return text.strip()
352
 
353
- def ultra_humanize(self, text, passes=3):
354
- """Apply multiple passes of humanization for maximum effect"""
355
  current_text = text
356
 
357
  for pass_num in range(passes):
358
- print(f"Humanization pass {pass_num + 1}/{passes}")
359
 
360
- # Core humanization steps
361
- current_text = self.aggressive_phrase_elimination(current_text)
362
- current_text = self.add_extensive_contractions(current_text)
363
- current_text = self.apply_casual_language(current_text)
364
- current_text = self.inject_personality(current_text)
365
- current_text = self.restructure_sentences_completely(current_text)
366
- current_text = self.add_human_imperfections(current_text)
367
- current_text = self.add_specific_examples(current_text)
368
- current_text = self.add_conversational_flow(current_text)
369
 
370
- # Additional pass-specific enhancements
371
- if pass_num == 1:
372
- current_text = self.aggressive_phrase_elimination(current_text) # Second elimination
373
- elif pass_num == 2:
374
- current_text = self.inject_personality(current_text) # Extra personality
375
 
376
- return self.final_cleanup_and_polish(current_text)
377
 
378
  def get_readability_score(self, text):
379
  """Calculate readability score"""
@@ -388,8 +346,8 @@ class AdvancedAIHumanizer:
388
  except Exception as e:
389
  return f"Could not calculate readability: {str(e)}"
390
 
391
- def humanize_text(self, text, intensity="ultra"):
392
- """Main humanization method"""
393
  if not text or not text.strip():
394
  return "Please provide text to humanize."
395
 
@@ -404,13 +362,13 @@ class AdvancedAIHumanizer:
404
  except Exception as nltk_error:
405
  return f"NLTK Error: {str(nltk_error)}. Please try again."
406
 
407
- # Apply ultra-aggressive humanization
408
- if intensity == "ultra":
409
- result = self.ultra_humanize(text, passes=3)
410
- elif intensity == "heavy":
411
- result = self.ultra_humanize(text, passes=2)
412
- else:
413
- result = self.ultra_humanize(text, passes=1)
414
 
415
  return result
416
 
@@ -418,7 +376,7 @@ class AdvancedAIHumanizer:
418
  return f"Error processing text: {str(e)}"
419
 
420
  def create_interface():
421
- """Create the advanced Gradio interface with full-width layout"""
422
  humanizer = AdvancedAIHumanizer()
423
 
424
  def process_text(input_text, intensity):
@@ -431,188 +389,128 @@ def create_interface():
431
  except Exception as e:
432
  return f"Error: {str(e)}", "Processing error"
433
 
434
- # Full-width CSS for maximum screen utilization
435
- full_width_css = """
436
- /* Override max width of gradio container and cards to full width */
437
- .gradio-container, .gradio-container > div {
438
- max-width: 100% !important;
439
- width: 100% !important;
440
- padding-left: 10px !important;
441
- padding-right: 10px !important;
442
- margin: 0 !important;
443
  }
444
-
445
- /* Make all cards take full width inside their container */
446
- .card, .gradio-card {
447
- max-width: 100% !important;
448
- width: 100% !important;
449
- margin: 5px 0 !important;
450
- }
451
-
452
- /* Remove margin and padding from rows and columns to maximize width */
453
- .gradio-row, .gradio-column {
454
- margin: 0 !important;
455
- padding: 5px !important;
456
- gap: 10px !important;
457
- }
458
-
459
- /* Full width for textboxes and components */
460
- .gradio-textbox, .gradio-button, .gradio-radio {
461
- width: 100% !important;
462
- max-width: 100% !important;
463
- }
464
-
465
- /* Responsive design for different screen sizes */
466
- @media (min-width: 1536px) {
467
- .gradio-container {
468
- max-width: 100% !important;
469
- }
470
- }
471
-
472
- @media (min-width: 1280px) {
473
- .gradio-container {
474
- max-width: 100% !important;
475
- }
476
- }
477
-
478
- @media (min-width: 1024px) {
479
- .gradio-container {
480
- max-width: 100% !important;
481
- }
482
- }
483
-
484
- /* Custom styling for better visual appeal */
485
  .main-header {
486
  text-align: center;
487
- background: linear-gradient(45deg, #FF6B6B, #4ECDC4);
488
- -webkit-background-clip: text;
489
- -webkit-text-fill-color: transparent;
490
- font-size: 2.5em !important;
491
- font-weight: bold;
492
  margin-bottom: 20px;
 
 
493
  }
494
-
495
  .feature-box {
496
- background: #f8f9fa;
497
- border-radius: 10px;
498
  padding: 20px;
499
- margin: 10px 0;
500
- border-left: 4px solid #4ECDC4;
 
501
  }
502
-
503
- .stats-box {
504
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
505
- color: white;
506
- border-radius: 10px;
507
  padding: 15px;
508
  margin: 10px 0;
509
- text-align: center;
510
  }
511
  """
512
 
513
  with gr.Blocks(
514
- title="Ultra AI Humanizer - 0% Detection",
515
  theme=gr.themes.Soft(),
516
- css=full_width_css,
517
- fill_width=True
518
  ) as interface:
519
 
520
  gr.HTML("""
521
  <div class="main-header">
522
- πŸ€–βž‘οΈπŸ‘€ ULTRA AI HUMANIZER
523
  </div>
524
  <div style="text-align: center; margin-bottom: 30px;">
525
- <h3>🎯 Achieve TRUE 0% AI Detection Score</h3>
526
- <p style="font-size: 1.1em; color: #666;">
527
- Advanced multi-pass humanization with personality injection,
528
- conversational restructuring, and human imperfection simulation
529
  </p>
530
  </div>
531
  """)
532
 
533
  with gr.Row():
534
- with gr.Column(scale=1, min_width=400):
535
  input_text = gr.Textbox(
536
- label="πŸ€– AI-Generated Text Input",
537
- lines=15,
538
- placeholder="Paste your AI-generated content here for ultra-humanization...\n\nThe more text you provide, the better the humanization results!\n\nTip: Longer texts (200+ words) get significantly better humanization results.",
539
- info="πŸ’‘ Tip: Longer texts (200+ words) get better humanization results",
540
- max_lines=20,
541
  show_copy_button=True
542
  )
543
 
544
  intensity = gr.Radio(
545
  choices=[
546
- ("Light Humanization", "light"),
547
- ("Heavy Humanization", "heavy"),
548
- ("πŸš€ ULTRA Humanization (Recommended)", "ultra")
549
  ],
550
- value="ultra",
551
- label="πŸŽ›οΈ Humanization Intensity",
552
- info="Ultra mode applies 3 passes of advanced humanization techniques"
553
  )
554
 
555
  btn = gr.Button(
556
- "πŸš€ HUMANIZE TO 0% AI DETECTION",
557
  variant="primary",
558
- size="lg",
559
- scale=1
560
  )
561
 
562
- with gr.Column(scale=1, min_width=400):
563
  output_text = gr.Textbox(
564
- label="πŸ‘€ Humanized Text Output (0% AI Detection)",
565
- lines=15,
566
  show_copy_button=True,
567
- info="βœ… Copy this text - it should pass all AI detectors",
568
- max_lines=20
569
  )
570
 
571
  readability = gr.Textbox(
572
- label="πŸ“Š Readability Analysis",
573
- lines=4,
574
- info="Lower grade levels are more conversational and human-like"
575
  )
576
 
577
  gr.HTML("""
578
  <div class="feature-box">
579
- <h3>🎯 How This Achieves 0% AI Detection:</h3>
580
- <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 15px; margin: 15px 0;">
581
- <div>
582
- <strong>πŸ”₯ Aggressive Phrase Elimination:</strong><br>
583
- Removes ALL AI-flagged terms and patterns
584
  </div>
585
- <div>
586
- <strong>πŸ’¬ Personality Injection:</strong><br>
587
- Adds personal voice, opinions, and conversational style
588
  </div>
589
- <div>
590
- <strong>πŸ”„ Multi-Pass Processing:</strong><br>
591
- Applies humanization techniques multiple times
592
  </div>
593
- <div>
594
- <strong>🎭 Human Imperfections:</strong><br>
595
- Adds natural hesitations, self-corrections, and casual speech
596
  </div>
597
- <div>
598
- <strong>πŸ“ Sentence Restructuring:</strong><br>
599
- Completely rebuilds sentence patterns
600
  </div>
601
- <div>
602
- <strong>πŸ’‘ Specific Examples:</strong><br>
603
- Injects unique examples and personal anecdotes
604
  </div>
605
  </div>
606
  </div>
607
  """)
608
 
609
- gr.HTML("""
610
- <div class="stats-box">
611
- <h3>✨ Test your results with Originality.ai, GPTZero, and other AI detectors!</h3>
612
- <p>This tool is specifically designed to pass the most sophisticated AI detection systems</p>
613
- </div>
614
- """)
615
-
616
  # Event handlers
617
  btn.click(
618
  fn=process_text,
@@ -629,7 +527,7 @@ def create_interface():
629
  return interface
630
 
631
  if __name__ == "__main__":
632
- print("πŸš€ Starting Ultra AI Humanizer for 0% Detection...")
633
  app = create_interface()
634
  app.launch(
635
  server_name="0.0.0.0",
 
4
  import re
5
  import nltk
6
  from nltk.tokenize import sent_tokenize, word_tokenize
7
+ from nltk.corpus import wordnet
8
  from textstat import flesch_reading_ease, flesch_kincaid_grade
9
  import string
10
+ from collections import defaultdict
11
 
12
  # Setup NLTK download path for Hugging Face Spaces
13
  os.environ['NLTK_DATA'] = '/tmp/nltk_data'
 
18
  os.makedirs('/tmp/nltk_data', exist_ok=True)
19
  nltk.data.path.append('/tmp/nltk_data')
20
 
21
+ required_data = ['punkt', 'punkt_tab', 'averaged_perceptron_tagger',
22
+ 'stopwords', 'wordnet', 'omw-1.4']
23
 
24
  for data in required_data:
25
  try:
 
37
 
38
  class AdvancedAIHumanizer:
39
  def __init__(self):
40
+ self.setup_humanization_patterns()
41
+ self.load_synonym_database()
42
+
43
+ def setup_humanization_patterns(self):
44
+ """Setup sophisticated humanization patterns that preserve meaning"""
45
+
46
+ # AI-flagged formal terms with contextually appropriate replacements
47
+ self.formal_replacements = {
48
+ r'\bdelve into\b': ["explore", "examine", "investigate", "analyze"],
49
+ r'\bembark on\b': ["begin", "start", "initiate", "commence"],
50
+ r'\ba testament to\b': ["evidence of", "proof of", "demonstrates", "shows"],
51
+ r'\blandscape of\b': ["context of", "environment of", "field of", "domain of"],
52
+ r'\bnavigating\b': ["managing", "addressing", "handling", "working through"],
53
+ r'\bmeticulous\b': ["careful", "thorough", "detailed", "precise"],
54
+ r'\bintricate\b': ["complex", "detailed", "sophisticated", "elaborate"],
55
+ r'\bmyriad\b': ["numerous", "many", "various", "multiple"],
56
+ r'\bplethora\b': ["abundance", "variety", "range", "collection"],
57
+ r'\bparadigm\b': ["model", "framework", "approach", "system"],
58
+ r'\bsynergy\b': ["collaboration", "cooperation", "coordination", "integration"],
59
+ r'\bleverage\b': ["utilize", "employ", "use", "apply"],
60
+ r'\bfacilitate\b': ["enable", "support", "assist", "help"],
61
+ r'\boptimize\b': ["improve", "enhance", "refine", "perfect"],
62
+ r'\bstreamline\b': ["simplify", "improve", "refine", "enhance"],
63
+ r'\brobust\b': ["strong", "reliable", "effective", "solid"],
64
+ r'\bseamless\b': ["smooth", "integrated", "unified", "continuous"],
65
+ r'\binnovative\b': ["creative", "original", "novel", "advanced"],
66
+ r'\bcutting-edge\b': ["advanced", "latest", "modern", "current"],
67
+ r'\bstate-of-the-art\b': ["advanced", "modern", "sophisticated", "current"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
 
70
+ # Transition phrase variations
71
+ self.transition_replacements = {
72
+ r'\bfurthermore\b': ["additionally", "moreover", "in addition", "also"],
73
+ r'\bmoreover\b': ["furthermore", "additionally", "also", "in addition"],
74
+ r'\bhowever\b': ["nevertheless", "yet", "still", "although"],
75
+ r'\bnevertheless\b': ["however", "yet", "still", "nonetheless"],
76
+ r'\btherefore\b': ["consequently", "thus", "as a result", "hence"],
77
+ r'\bconsequently\b': ["therefore", "thus", "as a result", "accordingly"],
78
+ r'\bin conclusion\b': ["finally", "ultimately", "in summary", "to summarize"],
79
+ r'\bto summarize\b': ["in conclusion", "finally", "in summary", "overall"],
80
+ r'\bin summary\b': ["to conclude", "overall", "finally", "in essence"]
 
 
 
 
81
  }
82
 
83
+ # Sentence structure patterns for variation
84
+ self.sentence_starters = [
85
+ "Additionally,", "Furthermore,", "In particular,", "Notably,",
86
+ "Importantly,", "Significantly,", "Moreover,", "Consequently,"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  ]
88
 
89
+ # Professional contractions (limited and contextual)
90
+ self.professional_contractions = {
91
+ r'\bit is\b': "it's",
92
+ r'\bthere is\b': "there's",
93
+ r'\bthat is\b': "that's",
94
+ r'\bcannot\b': "can't",
95
+ r'\bdo not\b': "don't",
96
+ r'\bdoes not\b': "doesn't",
97
+ r'\bwill not\b': "won't",
98
+ r'\bwould not\b': "wouldn't"
 
 
 
 
99
  }
100
 
101
+ def load_synonym_database(self):
102
+ """Load and prepare synonym database using WordNet"""
103
+ try:
104
+ # Test WordNet availability
105
+ wordnet.synsets('test')
106
+ self.wordnet_available = True
107
+ except:
108
+ self.wordnet_available = False
109
+ print("WordNet not available, using limited synonym replacement")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
+ def get_contextual_synonym(self, word, pos_tag=None):
112
+ """Get contextually appropriate synonym using WordNet"""
113
+ if not self.wordnet_available:
114
+ return word
 
 
 
 
 
 
 
115
 
116
+ try:
117
+ # Get synsets for the word
118
+ synsets = wordnet.synsets(word.lower())
119
+ if not synsets:
120
+ return word
121
 
122
+ # Get synonyms from the first synset
123
+ synonyms = []
124
+ for synset in synsets[:2]: # Check first 2 synsets
125
+ for lemma in synset.lemmas():
126
+ synonym = lemma.name().replace('_', ' ')
127
+ if synonym != word.lower() and len(synonym) > 2:
128
+ synonyms.append(synonym)
129
+
130
+ if synonyms:
131
+ # Return a synonym that's similar in length to avoid dramatic changes
132
+ suitable_synonyms = [s for s in synonyms if abs(len(s) - len(word)) <= 3]
133
+ if suitable_synonyms:
134
+ return random.choice(suitable_synonyms)
135
+ else:
136
+ return random.choice(synonyms)
137
+
138
+ return word
139
 
140
+ except:
141
+ return word
142
+
143
+ def preserve_meaning_replacement(self, text):
144
+ """Replace AI-flagged terms while preserving exact meaning"""
145
+ result = text
146
+
147
+ # Apply formal term replacements
148
+ for pattern, replacements in self.formal_replacements.items():
149
+ if re.search(pattern, result, re.IGNORECASE):
150
+ replacement = random.choice(replacements)
151
+ result = re.sub(pattern, replacement, result, flags=re.IGNORECASE)
152
 
153
+ # Apply transition phrase replacements
154
+ for pattern, replacements in self.transition_replacements.items():
155
+ if re.search(pattern, result, re.IGNORECASE):
156
+ replacement = random.choice(replacements)
157
+ result = re.sub(pattern, replacement, result, flags=re.IGNORECASE)
158
+
159
+ return result
160
 
161
+ def vary_sentence_structure(self, text):
162
+ """Vary sentence structures while maintaining meaning"""
163
  sentences = sent_tokenize(text)
164
+ varied_sentences = []
165
 
166
+ for i, sentence in enumerate(sentences):
167
+ # Occasionally add transitional phrases at the beginning
168
+ if i > 0 and len(sentence.split()) > 6 and random.random() < 0.15:
169
+ starter = random.choice(self.sentence_starters)
170
+ sentence = sentence[0].lower() + sentence[1:]
171
+ sentence = f"{starter} {sentence}"
 
172
 
173
+ # Convert some passive to active voice and vice versa
174
+ sentence = self.vary_voice(sentence)
 
 
 
 
 
 
 
175
 
176
+ # Restructure complex sentences occasionally
177
+ if len(sentence.split()) > 15 and random.random() < 0.2:
178
+ sentence = self.restructure_complex_sentence(sentence)
 
179
 
180
+ varied_sentences.append(sentence)
181
+
182
+ return " ".join(varied_sentences)
183
 
184
+ def vary_voice(self, sentence):
185
+ """Convert between active and passive voice occasionally"""
186
+ # Simple passive to active conversion patterns
187
+ passive_patterns = [
188
+ (r'(\w+) (?:is|are|was|were) (\w+ed|known|seen|used|made) by (.+)',
189
+ r'\3 \2 \1'),
190
+ (r'(\w+) (?:is|are|was|were) (\w+ed|known|seen|used|made)',
191
+ r'Someone \2 \1')
192
+ ]
193
 
194
+ for pattern, replacement in passive_patterns:
195
+ if re.search(pattern, sentence) and random.random() < 0.1:
196
+ sentence = re.sub(pattern, replacement, sentence)
197
+ break
 
 
 
 
198
 
199
+ return sentence
200
+
201
+ def restructure_complex_sentence(self, sentence):
202
+ """Restructure overly complex sentences"""
203
+ # Split long sentences at natural break points
204
+ if ',' in sentence and len(sentence.split()) > 15:
205
+ parts = sentence.split(',', 1)
206
+ if len(parts) == 2:
207
+ first_part = parts[0].strip()
208
+ second_part = parts[1].strip()
209
 
210
+ # Rejoin with different structure
211
+ connectors = ["Additionally", "Furthermore", "Moreover", "Also"]
212
  connector = random.choice(connectors)
213
+ return f"{first_part}. {connector}, {second_part}"
214
+
215
+ return sentence
216
+
217
+ def apply_subtle_contractions(self, text):
218
+ """Apply professional contractions sparingly"""
219
+ for pattern, contraction in self.professional_contractions.items():
220
+ # Only apply contractions 30% of the time to maintain variation
221
+ if re.search(pattern, text, re.IGNORECASE) and random.random() < 0.3:
222
+ text = re.sub(pattern, contraction, text, flags=re.IGNORECASE)
223
+
224
+ return text
225
+
226
+ def enhance_vocabulary_diversity(self, text):
227
+ """Enhance vocabulary diversity using contextual synonyms"""
228
+ words = word_tokenize(text)
229
+ enhanced_words = []
230
+ word_frequency = defaultdict(int)
231
+
232
+ # Track word frequency to identify repetitive words
233
+ for word in words:
234
+ if word.isalpha() and len(word) > 4:
235
+ word_frequency[word.lower()] += 1
236
+
237
+ for word in words:
238
+ if (word.isalpha() and len(word) > 4 and
239
+ word_frequency[word.lower()] > 1 and
240
+ random.random() < 0.2):
241
+
242
+ synonym = self.get_contextual_synonym(word)
243
+ enhanced_words.append(synonym)
244
  else:
245
+ enhanced_words.append(word)
 
 
 
246
 
247
+ return ' '.join(enhanced_words)
 
 
248
 
249
+ def add_natural_variation(self, text):
250
+ """Add natural human-like variations"""
251
  sentences = sent_tokenize(text)
252
+ varied_sentences = []
253
 
254
  for sentence in sentences:
255
+ # Occasionally vary sentence length and structure
256
+ if len(sentence.split()) > 20 and random.random() < 0.15:
257
+ # Split very long sentences
258
+ mid_point = len(sentence.split()) // 2
259
+ words = sentence.split()
260
+
261
+ # Find natural break point near middle
262
+ for i in range(mid_point - 2, mid_point + 3):
263
+ if i < len(words) and words[i] in [',', 'and', 'but', 'or', 'because']:
264
+ first_part = ' '.join(words[:i])
265
+ second_part = ' '.join(words[i+1:])
266
+ sentence = f"{first_part}. {second_part.capitalize()}"
267
+ break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
+ # Add subtle emphasis occasionally
270
+ if random.random() < 0.05:
271
+ sentence = self.add_subtle_emphasis(sentence)
 
272
 
273
+ varied_sentences.append(sentence)
 
 
 
 
 
 
 
 
 
274
 
275
+ return " ".join(varied_sentences)
276
+
277
+ def add_subtle_emphasis(self, sentence):
278
+ """Add very subtle emphasis that doesn't change meaning"""
279
+ emphasis_patterns = [
280
+ (r'\bvery important\b', "crucial"),
281
+ (r'\bvery significant\b', "highly significant"),
282
+ (r'\bvery effective\b', "highly effective"),
283
+ (r'\bvery useful\b', "particularly useful")
284
+ ]
285
 
286
+ for pattern, replacement in emphasis_patterns:
287
+ if re.search(pattern, sentence, re.IGNORECASE):
288
+ sentence = re.sub(pattern, replacement, sentence, flags=re.IGNORECASE)
289
+ break
290
+
291
+ return sentence
292
 
293
+ def final_coherence_check(self, text):
294
+ """Final check to ensure coherence and proper formatting"""
295
+ # Fix spacing issues
296
  text = re.sub(r'\s+', ' ', text)
297
+ text = re.sub(r'\s+([,.!?;:])', r'\1', text)
298
+ text = re.sub(r'([,.!?;:])\s*([A-Z])', r'\1 \2', text)
299
 
300
+ # Ensure proper capitalization
301
+ sentences = sent_tokenize(text)
302
+ corrected_sentences = []
303
 
304
+ for sentence in sentences:
305
+ if sentence and sentence[0].islower():
306
+ sentence = sentence[0].upper() + sentence[1:]
307
+ corrected_sentences.append(sentence)
308
+
309
+ text = " ".join(corrected_sentences)
310
 
311
+ # Remove any double periods or spaces
312
+ text = re.sub(r'\.+', '.', text)
313
+ text = re.sub(r'\s+', ' ', text)
314
 
315
  return text.strip()
316
 
317
+ def advanced_humanize(self, text, passes=2):
318
+ """Apply sophisticated humanization that preserves meaning"""
319
  current_text = text
320
 
321
  for pass_num in range(passes):
322
+ print(f"Processing pass {pass_num + 1}/{passes}")
323
 
324
+ # Apply humanization techniques
325
+ current_text = self.preserve_meaning_replacement(current_text)
326
+ current_text = self.vary_sentence_structure(current_text)
327
+ current_text = self.enhance_vocabulary_diversity(current_text)
328
+ current_text = self.apply_subtle_contractions(current_text)
329
+ current_text = self.add_natural_variation(current_text)
 
 
 
330
 
331
+ # Final coherence and cleanup
332
+ current_text = self.final_coherence_check(current_text)
 
 
 
333
 
334
+ return current_text
335
 
336
  def get_readability_score(self, text):
337
  """Calculate readability score"""
 
346
  except Exception as e:
347
  return f"Could not calculate readability: {str(e)}"
348
 
349
+ def humanize_text(self, text, intensity="professional"):
350
+ """Main humanization method with meaning preservation"""
351
  if not text or not text.strip():
352
  return "Please provide text to humanize."
353
 
 
362
  except Exception as nltk_error:
363
  return f"NLTK Error: {str(nltk_error)}. Please try again."
364
 
365
+ # Apply appropriate level of humanization
366
+ if intensity == "professional":
367
+ result = self.advanced_humanize(text, passes=2)
368
+ elif intensity == "enhanced":
369
+ result = self.advanced_humanize(text, passes=3)
370
+ else: # light
371
+ result = self.advanced_humanize(text, passes=1)
372
 
373
  return result
374
 
 
376
  return f"Error processing text: {str(e)}"
377
 
378
  def create_interface():
379
+ """Create the professional Gradio interface"""
380
  humanizer = AdvancedAIHumanizer()
381
 
382
  def process_text(input_text, intensity):
 
389
  except Exception as e:
390
  return f"Error: {str(e)}", "Processing error"
391
 
392
+ # Professional CSS styling
393
+ professional_css = """
394
+ .gradio-container {
395
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
 
 
 
 
 
396
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  .main-header {
398
  text-align: center;
399
+ color: #2c3e50;
400
+ font-size: 2.2em;
401
+ font-weight: 600;
 
 
402
  margin-bottom: 20px;
403
+ padding: 20px;
404
+ border-bottom: 2px solid #3498db;
405
  }
 
406
  .feature-box {
407
+ background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
408
+ border-radius: 8px;
409
  padding: 20px;
410
+ margin: 15px 0;
411
+ border-left: 4px solid #3498db;
412
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
413
  }
414
+ .info-box {
415
+ background: #e8f5e8;
416
+ border-radius: 8px;
 
 
417
  padding: 15px;
418
  margin: 10px 0;
419
+ border-left: 4px solid #27ae60;
420
  }
421
  """
422
 
423
  with gr.Blocks(
424
+ title="Professional AI Humanizer",
425
  theme=gr.themes.Soft(),
426
+ css=professional_css
 
427
  ) as interface:
428
 
429
  gr.HTML("""
430
  <div class="main-header">
431
+ 🎯 Professional AI Content Humanizer
432
  </div>
433
  <div style="text-align: center; margin-bottom: 30px;">
434
+ <h3>Meaning-Preserving AI Detection Bypass</h3>
435
+ <p style="font-size: 1.1em; color: #7f8c8d;">
436
+ Advanced humanization while maintaining professional tone and original meaning
 
437
  </p>
438
  </div>
439
  """)
440
 
441
  with gr.Row():
442
+ with gr.Column(scale=1):
443
  input_text = gr.Textbox(
444
+ label="πŸ“ Original Content",
445
+ lines=12,
446
+ placeholder="Enter your AI-generated content here...\n\nThis tool will humanize it while preserving the original meaning and maintaining a professional tone.",
447
+ info="πŸ’‘ Best results with content 100+ words",
 
448
  show_copy_button=True
449
  )
450
 
451
  intensity = gr.Radio(
452
  choices=[
453
+ ("Light Processing", "light"),
454
+ ("Professional Enhancement", "professional"),
455
+ ("Advanced Humanization", "enhanced")
456
  ],
457
+ value="professional",
458
+ label="πŸ”§ Processing Level",
459
+ info="Professional mode recommended for most content"
460
  )
461
 
462
  btn = gr.Button(
463
+ "πŸš€ Humanize Content",
464
  variant="primary",
465
+ size="lg"
 
466
  )
467
 
468
+ with gr.Column(scale=1):
469
  output_text = gr.Textbox(
470
+ label="βœ… Humanized Content",
471
+ lines=12,
472
  show_copy_button=True,
473
+ info="Processed content ready for use"
 
474
  )
475
 
476
  readability = gr.Textbox(
477
+ label="πŸ“Š Content Analysis",
478
+ lines=3,
479
+ info="Readability metrics"
480
  )
481
 
482
  gr.HTML("""
483
  <div class="feature-box">
484
+ <h3>🎯 Advanced Humanization Features:</h3>
485
+ <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 15px; margin: 15px 0;">
486
+ <div class="info-box">
487
+ <strong>πŸ”„ Meaning Preservation:</strong><br>
488
+ Maintains exact original meaning and intent
489
  </div>
490
+ <div class="info-box">
491
+ <strong>πŸ“ Professional Tone:</strong><br>
492
+ Keeps appropriate formality level
493
  </div>
494
+ <div class="info-box">
495
+ <strong>🎭 Structure Variation:</strong><br>
496
+ Natural sentence pattern diversity
497
  </div>
498
+ <div class="info-box">
499
+ <strong>πŸ“š Smart Synonyms:</strong><br>
500
+ Context-aware vocabulary enhancement
501
  </div>
502
+ <div class="info-box">
503
+ <strong>πŸ”— Coherent Flow:</strong><br>
504
+ Maintains logical progression
505
  </div>
506
+ <div class="info-box">
507
+ <strong>⚑ Detection Bypass:</strong><br>
508
+ Passes modern AI detection tools
509
  </div>
510
  </div>
511
  </div>
512
  """)
513
 
 
 
 
 
 
 
 
514
  # Event handlers
515
  btn.click(
516
  fn=process_text,
 
527
  return interface
528
 
529
  if __name__ == "__main__":
530
+ print("πŸš€ Starting Professional AI Humanizer...")
531
  app = create_interface()
532
  app.launch(
533
  server_name="0.0.0.0",