conversantech commited on
Commit
20f3fd0
Β·
1 Parent(s): 4facc83
Files changed (1) hide show
  1. app.py +379 -243
app.py CHANGED
@@ -32,272 +32,349 @@ def download_nltk_data():
32
 
33
  download_nltk_data()
34
 
35
- class AIContentHumanizer:
36
  def __init__(self):
37
- self.setup_humanization_patterns()
38
 
39
- def setup_humanization_patterns(self):
40
- """Setup comprehensive patterns for maximum humanization"""
41
- # Avoid AI-flagged terms completely
42
- self.ai_flagged_terms = {
43
- r'\brealm\b': ["world", "area", "field", "space", "domain"],
44
- r'\bdelve\b': ["explore", "dig into", "look at", "examine", "dive into"],
45
- r'\bembark\b': ["start", "begin", "kick off", "launch into"],
 
46
  r'\ba testament to\b': ["shows", "proves", "demonstrates", "reflects"],
47
- r'\bthe landscape of\b': ["the world of", "the field of", "the area of"],
48
- r'\bnavigating\b': ["dealing with", "handling", "managing", "working through"],
49
- r'\bmeticulous\b': ["careful", "detailed", "thorough", "precise"],
50
- r'\bintricate\b': ["complex", "detailed", "complicated", "elaborate"],
51
- r'\bfurthermore\b': ["plus", "also", "and", "what's more"],
52
- r'\bmoreover\b': ["also", "plus", "and", "on top of that"],
53
  r'\bhowever\b': ["but", "though", "yet", "still"],
54
  r'\bnevertheless\b': ["but", "still", "even so", "anyway"],
55
- r'\btherefore\b': ["so", "thus", "that's why", "because of this"],
56
- r'\bconsequently\b': ["so", "as a result", "because of this"],
57
- r'\bin conclusion\b': ["to wrap up", "bottom line", "all in all"],
58
- r'\bit is important to note\b': ["worth mentioning", "keep in mind", "note that"],
59
- r'\bit should be noted\b': ["remember", "keep in mind", "note that"],
60
- r'\bsignificant\b': ["big", "major", "important", "huge"],
61
- r'\bsubstantial\b': ["big", "large", "major", "significant"],
62
  r'\bcomprehensive\b': ["complete", "full", "thorough", "detailed"],
63
- r'\boptimal\b': ["best", "ideal", "perfect", "top"],
64
- r'\bfacilitate\b': ["help", "make easier", "enable", "assist"],
65
- r'\butilize\b': ["use", "employ", "apply", "work with"],
66
- r'\bleverage\b': ["use", "take advantage of", "make use of"],
67
  r'\benhance\b': ["improve", "boost", "make better", "upgrade"],
68
- r'\bimplement\b': ["put in place", "set up", "start using", "apply"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
 
71
- # More natural contractions
72
  self.contractions = {
73
- r'\bdo not\b': "don't",
74
- r'\bdoes not\b': "doesn't",
75
- r'\bdid not\b': "didn't",
76
- r'\bwill not\b': "won't",
77
- r'\bwould not\b': "wouldn't",
78
- r'\bcould not\b': "couldn't",
79
- r'\bshould not\b': "shouldn't",
80
- r'\bcannot\b': "can't",
81
- r'\bis not\b': "isn't",
82
- r'\bare not\b': "aren't",
83
- r'\bwas not\b': "wasn't",
84
- r'\bwere not\b': "weren't",
85
- r'\bhave not\b': "haven't",
86
- r'\bhas not\b': "hasn't",
87
- r'\bhad not\b': "hadn't",
88
- r'\bI will\b': "I'll",
89
- r'\byou will\b': "you'll",
90
- r'\bhe will\b': "he'll",
91
- r'\bshe will\b': "she'll",
92
- r'\bwe will\b': "we'll",
93
- r'\bthey will\b': "they'll",
94
- r'\bI would\b': "I'd",
95
- r'\byou would\b': "you'd",
96
- r'\bI have\b': "I've",
97
- r'\byou have\b': "you've",
98
- r'\bwe have\b': "we've",
99
- r'\bthey have\b': "they've",
100
- r'\bthat is\b': "that's",
101
- r'\bit is\b': "it's",
102
- r'\bwho is\b': "who's",
103
- r'\bwhat is\b': "what's",
104
- r'\bwhere is\b': "where's"
105
  }
106
 
107
- # Casual conversation starters
108
- self.conversation_starters = [
109
- "Look,", "Listen,", "Here's the thing -", "You know what?",
110
- "Honestly,", "Real talk -", "Let me tell you,", "So here's what I think -",
111
- "Okay, so", "Right, so", "Well,", "Actually,", "Basically,"
 
 
112
  ]
113
 
114
  # Natural fillers and expressions
115
- self.natural_expressions = [
116
- "you know", "I mean", "like", "actually", "basically",
117
- "honestly", "literally", "obviously", "clearly", "definitely",
118
- "pretty much", "kind of", "sort of", "more or less", "at the end of the day",
119
  "when it comes down to it", "if you ask me", "in my experience",
120
- "from what I've seen", "the way I see it"
 
121
  ]
122
 
123
  # Personal perspective phrases
124
- self.personal_phrases = [
125
  "I think", "in my opinion", "from my experience", "personally",
126
  "if you ask me", "the way I see it", "from what I've seen",
127
  "in my view", "as I see it", "my take is", "I believe",
128
- "it seems to me", "I'd say", "my guess is"
 
129
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
- def replace_ai_phrases(self, text):
132
- """Aggressively replace AI-flagged terms"""
133
- for pattern, replacements in self.ai_flagged_terms.items():
134
- while re.search(pattern, text, re.IGNORECASE):
 
135
  replacement = random.choice(replacements)
136
  text = re.sub(pattern, replacement, text, flags=re.IGNORECASE, count=1)
 
137
  return text
138
 
139
- def add_contractions(self, text):
140
- """Add extensive contractions"""
141
  for pattern, contraction in self.contractions.items():
142
  text = re.sub(pattern, contraction, text, flags=re.IGNORECASE)
143
  return text
144
 
145
- def add_human_imperfections(self, text):
146
- """Add subtle human-like imperfections"""
147
  sentences = sent_tokenize(text)
148
- modified_sentences = []
149
 
150
- for sentence in sentences:
151
- # Add occasional typos that are quickly corrected
152
- if random.random() < 0.1:
 
 
 
 
 
 
 
 
 
 
 
 
153
  words = sentence.split()
154
  if len(words) > 5:
155
- # Add a self-correction
156
- correction_phrases = ["I mean", "or rather", "well, actually", "sorry,"]
157
- correction = random.choice(correction_phrases)
158
- insert_pos = random.randint(2, len(words) - 2)
159
- words.insert(insert_pos, f"- {correction} -")
160
  sentence = " ".join(words)
161
 
162
- # Add hesitation markers
163
- if random.random() < 0.15:
164
- hesitations = ["um,", "uh,", "well,", "so,", "like,"]
165
- hesitation = random.choice(hesitations)
166
- sentence = f"{hesitation} {sentence.lower()}"
167
- sentence = sentence[0].upper() + sentence[1:]
168
-
169
- modified_sentences.append(sentence)
170
 
171
- return " ".join(modified_sentences)
172
 
173
- def vary_sentence_structure_advanced(self, text):
174
- """Advanced sentence structure variation"""
175
  sentences = sent_tokenize(text)
176
- varied_sentences = []
177
 
178
- for i, sentence in enumerate(sentences):
179
- words = sentence.split()
180
-
181
- # Add conversation starters randomly
182
- if random.random() < 0.25 and len(words) > 6:
183
- starter = random.choice(self.conversation_starters)
184
- sentence = f"{starter} {sentence.lower()}"
185
  sentence = sentence[0].upper() + sentence[1:]
186
 
187
- # Break long sentences with interjections
188
- if len(words) > 15 and random.random() < 0.4:
189
- mid_point = len(words) // 2
190
- interjection = random.choice(["- and this is key -", "- here's the thing -", "- get this -"])
191
- words.insert(mid_point, interjection)
192
- sentence = " ".join(words)
 
 
 
193
 
194
- # Add questions to engage reader
195
- if random.random() < 0.2 and i > 0:
196
- questions = ["Right?", "You know?", "Make sense?", "See what I mean?"]
197
- sentence += f" {random.choice(questions)}"
198
 
199
- varied_sentences.append(sentence)
200
 
201
- return " ".join(varied_sentences)
202
 
203
- def add_personal_touches_advanced(self, text):
204
- """Add extensive personal touches"""
205
  sentences = sent_tokenize(text)
 
206
 
207
- # Add personal anecdotes
208
- if len(sentences) > 3 and random.random() < 0.3:
209
- anecdotes = [
210
- "I've been there myself, and",
211
- "From my own experience,",
212
- "I remember when I first learned this -",
213
- "This reminds me of something that happened to me -"
214
- ]
215
- insert_pos = random.randint(1, len(sentences) - 2)
216
- anecdote = random.choice(anecdotes)
217
- sentences[insert_pos] = f"{anecdote} {sentences[insert_pos].lower()}"
218
-
219
- # Add personal opinions
220
- for i in range(len(sentences)):
221
- if random.random() < 0.3:
222
- personal_phrase = random.choice(self.personal_phrases)
223
- sentences[i] = f"{personal_phrase}, {sentences[i].lower()}"
224
- sentences[i] = sentences[i][0].upper() + sentences[i][1:]
225
-
226
- return " ".join(sentences)
227
-
228
- def add_casual_punctuation_advanced(self, text):
229
- """Advanced casual punctuation"""
230
- # Add em dashes for emphasis
231
- text = re.sub(r'(\w+)\. ([A-Z])', r'\1 β€” \2', text)
232
-
233
- # Add ellipses for pauses
234
- if random.random() < 0.4:
235
- text = re.sub(r'(\w+)\.', r'\1...', text, count=random.randint(1, 2))
236
-
237
- # Add parenthetical asides
238
- sentences = sent_tokenize(text)
239
- if len(sentences) > 2 and random.random() < 0.3:
240
- asides = ["(trust me on this)", "(I know, I know)", "(bear with me)", "(you'll see why)"]
241
- insert_pos = random.randint(1, len(sentences) - 1)
242
- aside = random.choice(asides)
243
- sentences[insert_pos] += f" {aside}"
244
 
245
- return " ".join(sentences)
246
 
247
- def add_natural_fillers_advanced(self, text):
248
- """Add extensive natural conversation fillers"""
249
  sentences = sent_tokenize(text)
 
250
 
251
- for i in range(len(sentences)):
252
- if random.random() < 0.4:
253
- filler = random.choice(self.natural_expressions)
254
- # Insert filler at different positions
255
- words = sentences[i].split()
256
- if len(words) > 4:
257
- insert_pos = random.randint(1, min(3, len(words) - 1))
258
- words.insert(insert_pos, f"{filler},")
259
- sentences[i] = " ".join(words)
 
 
 
 
 
260
 
261
- return " ".join(sentences)
262
 
263
- def add_colloquialisms(self, text):
264
- """Add colloquial expressions and slang"""
265
- colloquial_replacements = {
266
- r'\bvery good\b': ["pretty great", "really solid", "super good"],
267
- r'\bvery bad\b': ["pretty awful", "really rough", "super bad"],
268
- r'\bvery important\b': ["super important", "really key", "pretty crucial"],
269
- r'\ba lot of\b': ["tons of", "loads of", "plenty of", "heaps of"],
270
- r'\bmany people\b': ["lots of folks", "tons of people", "loads of people"],
271
- r'\bquickly\b': ["fast", "in a flash", "super quick"],
272
- r'\bslowly\b': ["at a snail's pace", "pretty slow", "taking forever"]
273
- }
274
-
275
- for pattern, replacements in colloquial_replacements.items():
276
  if re.search(pattern, text, re.IGNORECASE):
277
  replacement = random.choice(replacements)
278
  text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
279
 
280
  return text
281
 
282
- def clean_text_advanced(self, text):
283
- """Advanced text cleaning with natural formatting"""
284
- # Fix spacing issues
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  text = re.sub(r'\s+', ' ', text)
286
  text = re.sub(r'\s+([,.!?])', r'\1', text)
287
 
288
- # Fix capitalization after sentence starters and interjections
289
  text = re.sub(r'([.!?]\s+)([a-z])', lambda m: m.group(1) + m.group(2).upper(), text)
290
- text = re.sub(r'(^|\. )([a-z])', lambda m: m.group(1) + m.group(2).upper(), text)
291
 
292
  # Ensure first letter is capitalized
293
  if text and text[0].islower():
294
  text = text[0].upper() + text[1:]
295
 
296
- # Clean up multiple punctuation
297
- text = re.sub(r'([.!?]){2,}', r'\1', text)
298
 
299
  return text.strip()
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  def get_readability_score(self, text):
302
  """Calculate readability score"""
303
  try:
@@ -311,8 +388,8 @@ class AIContentHumanizer:
311
  except Exception as e:
312
  return f"Could not calculate readability: {str(e)}"
313
 
314
- def humanize_text(self, text, intensity="heavy"):
315
- """Main method to humanize AI-generated text with maximum effectiveness"""
316
  if not text or not text.strip():
317
  return "Please provide text to humanize."
318
 
@@ -325,33 +402,24 @@ class AIContentHumanizer:
325
  if not test_tokens:
326
  raise Exception("NLTK tokenization failed")
327
  except Exception as nltk_error:
328
- return f"NLTK Error: {str(nltk_error)}. Please try again or contact support."
329
 
330
- # Apply aggressive humanization for 0% AI detection
331
- text = self.replace_ai_phrases(text)
332
- text = self.add_contractions(text)
333
- text = self.add_colloquialisms(text)
 
 
 
334
 
335
- if intensity in ["medium", "heavy"]:
336
- text = self.vary_sentence_structure_advanced(text)
337
- text = self.add_personal_touches_advanced(text)
338
- text = self.add_casual_punctuation_advanced(text)
339
- text = self.add_natural_fillers_advanced(text)
340
-
341
- if intensity == "heavy":
342
- text = self.add_human_imperfections(text)
343
- # Apply multiple passes for maximum humanization
344
- text = self.replace_ai_phrases(text) # Second pass
345
- text = self.add_natural_fillers_advanced(text) # Second pass
346
-
347
- return self.clean_text_advanced(text)
348
 
349
  except Exception as e:
350
- return f"Error processing text: {str(e)}\n\nOriginal text: {text}"
351
 
352
  def create_interface():
353
- """Create the Gradio interface"""
354
- humanizer = AIContentHumanizer()
355
 
356
  def process_text(input_text, intensity):
357
  if not input_text:
@@ -363,47 +431,115 @@ def create_interface():
363
  except Exception as e:
364
  return f"Error: {str(e)}", "Processing error"
365
 
366
- with gr.Blocks(title="AI Content Humanizer - 0% Detection", theme=gr.themes.Soft()) as interface:
367
- gr.Markdown("""# πŸ€–βž‘οΈπŸ‘€ Advanced AI Content Humanizer
368
- **Achieve 0% AI Detection Score** - Transform AI content into completely human-sounding text!""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
 
370
  with gr.Row():
371
- with gr.Column():
372
  input_text = gr.Textbox(
373
- label="AI-generated Text",
374
- lines=10,
375
- placeholder="Paste your AI-generated text here for maximum humanization..."
 
376
  )
 
377
  intensity = gr.Radio(
378
- ["light", "medium", "heavy"],
379
- value="heavy",
380
- label="Humanization Level",
381
- info="Heavy mode recommended for 0% AI detection"
 
 
 
 
 
 
 
 
 
 
 
382
  )
383
- btn = gr.Button("πŸš€ Humanize for 0% AI Detection", variant="primary", size="lg")
384
 
385
- with gr.Column():
386
  output_text = gr.Textbox(
387
- label="Humanized Text (0% AI Detection)",
388
- lines=10,
389
- show_copy_button=True
 
390
  )
391
- readability = gr.Textbox(label="Readability Score", lines=3)
392
-
393
- gr.Markdown("""
394
- ### πŸ’‘ Tips for 0% AI Detection:
395
- - Use **Heavy** mode for maximum humanization
396
- - The tool adds natural conversation patterns, personal touches, and human imperfections
397
- - Removes all AI-flagged terms and phrases
398
- - Test your output with multiple AI detectors for verification
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
  """)
400
 
401
- btn.click(fn=process_text, inputs=[input_text, intensity], outputs=[output_text, readability])
402
- input_text.submit(fn=process_text, inputs=[input_text, intensity], outputs=[output_text, readability])
 
 
 
 
 
 
 
 
 
 
403
 
404
  return interface
405
 
406
  if __name__ == "__main__":
407
- print("Starting Advanced AI Content Humanizer for 0% Detection...")
408
  app = create_interface()
409
- app.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
 
 
 
 
 
 
32
 
33
  download_nltk_data()
34
 
35
+ class AdvancedAIHumanizer:
36
  def __init__(self):
37
+ self.setup_comprehensive_patterns()
38
 
39
+ def setup_comprehensive_patterns(self):
40
+ """Setup the most comprehensive humanization patterns"""
41
+
42
+ # AI-flagged terms that must be completely eliminated
43
+ self.ai_death_terms = {
44
+ r'\brealm\b': ["world", "space", "area", "field", "zone"],
45
+ r'\bdelve\b': ["dig into", "explore", "look at", "check out", "dive into"],
46
+ r'\bembark\b': ["start", "begin", "kick off", "jump into", "get going"],
47
  r'\ba testament to\b': ["shows", "proves", "demonstrates", "reflects"],
48
+ r'\bthe landscape of\b': ["the world of", "what's happening in", "the scene in"],
49
+ r'\bnavigating\b': ["dealing with", "handling", "figuring out", "working through"],
50
+ r'\bmeticulous\b': ["super careful", "really detailed", "thorough", "precise"],
51
+ r'\bintricate\b': ["complex", "detailed", "tricky", "complicated"],
52
+ r'\bfurthermore\b': ["plus", "also", "and", "on top of that"],
53
+ r'\bmoreover\b': ["also", "plus", "and", "what's more"],
54
  r'\bhowever\b': ["but", "though", "yet", "still"],
55
  r'\bnevertheless\b': ["but", "still", "even so", "anyway"],
56
+ r'\btherefore\b': ["so", "that's why", "because of this", "which means"],
57
+ r'\bconsequently\b': ["so", "as a result", "because of this", "that's why"],
58
+ r'\bin conclusion\b': ["to wrap up", "bottom line", "all in all", "so basically"],
59
+ r'\bit is important to note\b': ["worth mentioning", "keep in mind", "remember"],
60
+ r'\bit should be noted\b': ["remember", "keep in mind", "don't forget"],
61
+ r'\bsignificant\b': ["big", "major", "huge", "important"],
62
+ r'\bsubstantial\b': ["big", "large", "major", "huge"],
63
  r'\bcomprehensive\b': ["complete", "full", "thorough", "detailed"],
64
+ r'\boptimal\b': ["best", "perfect", "ideal", "top-notch"],
65
+ r'\bfacilitate\b': ["help", "make easier", "enable", "assist with"],
66
+ r'\butilize\b': ["use", "work with", "employ", "make use of"],
67
+ r'\bleverage\b': ["use", "take advantage of", "make use of", "tap into"],
68
  r'\benhance\b': ["improve", "boost", "make better", "upgrade"],
69
+ r'\bimplement\b': ["put in place", "set up", "start using", "roll out"],
70
+ r'\bparadigm\b': ["approach", "way", "method", "system"],
71
+ r'\bmethodology\b': ["method", "approach", "way", "system"],
72
+ r'\bsynergy\b': ["teamwork", "working together", "collaboration"],
73
+ r'\boptimize\b': ["improve", "make better", "fine-tune", "perfect"],
74
+ r'\bstreamline\b': ["simplify", "make easier", "smooth out"],
75
+ r'\brobust\b': ["strong", "solid", "reliable", "tough"],
76
+ r'\bscalable\b': ["flexible", "adaptable", "expandable"],
77
+ r'\bseamless\b': ["smooth", "easy", "effortless", "simple"],
78
+ r'\binnovative\b': ["new", "creative", "fresh", "cutting-edge"],
79
+ r'\bgroundbreaking\b': ["amazing", "revolutionary", "game-changing"],
80
+ r'\btransformative\b': ["life-changing", "game-changing", "revolutionary"],
81
+ r'\bparadigm shift\b': ["big change", "major shift", "game changer"],
82
+ r'\bgame changer\b': ["total game changer", "complete shift", "major breakthrough"],
83
+ r'\bcutting-edge\b': ["latest", "newest", "state-of-the-art", "advanced"],
84
+ r'\bstate-of-the-art\b': ["latest", "newest", "most advanced", "top-notch"]
85
  }
86
 
87
+ # Extensive contractions for natural speech
88
  self.contractions = {
89
+ r'\bdo not\b': "don't", r'\bdoes not\b': "doesn't", r'\bdid not\b': "didn't",
90
+ r'\bwill not\b': "won't", r'\bwould not\b': "wouldn't", r'\bcould not\b': "couldn't",
91
+ r'\bshould not\b': "shouldn't", r'\bcannot\b': "can't", r'\bis not\b': "isn't",
92
+ r'\bare not\b': "aren't", r'\bwas not\b': "wasn't", r'\bwere not\b': "weren't",
93
+ r'\bhave not\b': "haven't", r'\bhas not\b': "hasn't", r'\bhad not\b': "hadn't",
94
+ r'\bI will\b': "I'll", r'\byou will\b': "you'll", r'\bhe will\b': "he'll",
95
+ r'\bshe will\b': "she'll", r'\bwe will\b': "we'll", r'\bthey will\b': "they'll",
96
+ r'\bI would\b': "I'd", r'\byou would\b': "you'd", r'\bI have\b': "I've",
97
+ r'\byou have\b': "you've", r'\bwe have\b': "we've", r'\bthey have\b': "they've",
98
+ r'\bthat is\b': "that's", r'\bit is\b': "it's", r'\bwho is\b': "who's",
99
+ r'\bwhat is\b': "what's", r'\bwhere is\b': "where's", r'\bwhen is\b': "when's",
100
+ r'\bhow is\b': "how's", r'\bwhy is\b': "why's", r'\bthere is\b': "there's",
101
+ r'\bthere are\b': "there're", r'\bhere is\b': "here's"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  }
103
 
104
+ # Conversation starters that sound human
105
+ self.human_starters = [
106
+ "Look,", "Listen,", "Here's the thing -", "You know what?", "Honestly,",
107
+ "Real talk -", "Let me tell you,", "So here's what I think -", "Okay, so",
108
+ "Right, so", "Well,", "Actually,", "Basically,", "Here's what's wild -",
109
+ "Get this -", "Check it out -", "So I was thinking -", "You know what's crazy?",
110
+ "Here's something interesting -", "Let me break this down for you -"
111
  ]
112
 
113
  # Natural fillers and expressions
114
+ self.natural_fillers = [
115
+ "you know", "I mean", "like", "actually", "basically", "honestly",
116
+ "literally", "obviously", "clearly", "definitely", "pretty much",
117
+ "kind of", "sort of", "more or less", "at the end of the day",
118
  "when it comes down to it", "if you ask me", "in my experience",
119
+ "from what I've seen", "the way I see it", "real quick", "super quick",
120
+ "really fast", "pretty cool", "kinda weird", "sorta like"
121
  ]
122
 
123
  # Personal perspective phrases
124
+ self.personal_voices = [
125
  "I think", "in my opinion", "from my experience", "personally",
126
  "if you ask me", "the way I see it", "from what I've seen",
127
  "in my view", "as I see it", "my take is", "I believe",
128
+ "it seems to me", "I'd say", "my guess is", "from where I sit",
129
+ "in my book", "if I'm being honest", "to be real with you"
130
  ]
131
+
132
+ # Casual replacements for formal words
133
+ self.casual_replacements = {
134
+ r'\bvery\b': ["super", "really", "pretty", "totally", "extremely"],
135
+ r'\bextremely\b': ["super", "really", "incredibly", "totally"],
136
+ r'\bexcellent\b': ["awesome", "amazing", "fantastic", "great"],
137
+ r'\bsuperior\b': ["better", "way better", "much better", "superior"],
138
+ r'\binferior\b': ["worse", "not as good", "weaker", "inferior"],
139
+ r'\bnumerous\b': ["tons of", "loads of", "plenty of", "lots of"],
140
+ r'\bmultiple\b': ["several", "a bunch of", "various", "different"],
141
+ r'\badditional\b': ["more", "extra", "other", "additional"],
142
+ r'\bsubsequent\b': ["next", "following", "after that", "then"],
143
+ r'\bprevious\b': ["last", "earlier", "before", "previous"],
144
+ r'\binitial\b': ["first", "starting", "beginning", "initial"],
145
+ r'\bfinal\b': ["last", "ending", "final", "closing"]
146
+ }
147
+
148
+ def extract_core_meaning(self, sentence):
149
+ """Extract the core meaning from a sentence for reconstruction"""
150
+ # Remove common AI phrases and get to the point
151
+ cleaned = sentence
152
+ for pattern in self.ai_death_terms.keys():
153
+ cleaned = re.sub(pattern, "", cleaned, flags=re.IGNORECASE)
154
+
155
+ # Simplify and extract main idea
156
+ cleaned = re.sub(r'\b(that|which|who)\b', '', cleaned)
157
+ cleaned = re.sub(r'\s+', ' ', cleaned).strip()
158
+
159
+ return cleaned
160
 
161
+ def aggressive_phrase_elimination(self, text):
162
+ """Aggressively eliminate all AI-flagged terms"""
163
+ for pattern, replacements in self.ai_death_terms.items():
164
+ count = 0
165
+ while re.search(pattern, text, re.IGNORECASE) and count < 10:
166
  replacement = random.choice(replacements)
167
  text = re.sub(pattern, replacement, text, flags=re.IGNORECASE, count=1)
168
+ count += 1
169
  return text
170
 
171
+ def add_extensive_contractions(self, text):
172
+ """Add comprehensive contractions"""
173
  for pattern, contraction in self.contractions.items():
174
  text = re.sub(pattern, contraction, text, flags=re.IGNORECASE)
175
  return text
176
 
177
+ def inject_personality(self, text):
178
+ """Inject strong personality and voice"""
179
  sentences = sent_tokenize(text)
180
+ personality_injected = []
181
 
182
+ for i, sentence in enumerate(sentences):
183
+ # Add personal starters
184
+ if random.random() < 0.4:
185
+ starter = random.choice(self.human_starters)
186
+ sentence = f"{starter} {sentence.lower()}"
187
+ sentence = sentence[0].upper() + sentence[1:]
188
+
189
+ # Add personal opinions
190
+ if random.random() < 0.3:
191
+ opinion = random.choice(self.personal_voices)
192
+ sentence = f"{opinion}, {sentence.lower()}"
193
+ sentence = sentence[0].upper() + sentence[1:]
194
+
195
+ # Add casual expressions
196
+ if random.random() < 0.35:
197
  words = sentence.split()
198
  if len(words) > 5:
199
+ insert_pos = random.randint(2, min(len(words)-2, 5))
200
+ filler = random.choice(self.natural_fillers)
201
+ words.insert(insert_pos, f"{filler},")
 
 
202
  sentence = " ".join(words)
203
 
204
+ personality_injected.append(sentence)
 
 
 
 
 
 
 
205
 
206
+ return " ".join(personality_injected)
207
 
208
+ def add_human_imperfections(self, text):
209
+ """Add natural human speech patterns and imperfections"""
210
  sentences = sent_tokenize(text)
211
+ imperfect_sentences = []
212
 
213
+ for sentence in sentences:
214
+ # Add hesitations and self-corrections
215
+ if random.random() < 0.15:
216
+ hesitations = ["um,", "uh,", "well,", "so,", "like,", "you know,"]
217
+ hesitation = random.choice(hesitations)
218
+ sentence = f"{hesitation} {sentence.lower()}"
 
219
  sentence = sentence[0].upper() + sentence[1:]
220
 
221
+ # Add self-corrections
222
+ if random.random() < 0.1:
223
+ corrections = ["I mean", "or rather", "well, actually", "sorry", "wait"]
224
+ correction = random.choice(corrections)
225
+ words = sentence.split()
226
+ if len(words) > 6:
227
+ insert_pos = random.randint(3, len(words) - 2)
228
+ words.insert(insert_pos, f"β€” {correction} β€”")
229
+ sentence = " ".join(words)
230
 
231
+ # Add emphasis and interjections
232
+ if random.random() < 0.2:
233
+ emphasis = ["(seriously)", "(no joke)", "(trust me)", "(for real)", "(I'm not kidding)"]
234
+ sentence += f" {random.choice(emphasis)}"
235
 
236
+ imperfect_sentences.append(sentence)
237
 
238
+ return " ".join(imperfect_sentences)
239
 
240
+ def restructure_sentences_completely(self, text):
241
+ """Completely restructure sentences to break AI patterns"""
242
  sentences = sent_tokenize(text)
243
+ restructured = []
244
 
245
+ for sentence in sentences:
246
+ words = sentence.split()
247
+
248
+ # Break long sentences into conversational chunks
249
+ if len(words) > 12:
250
+ mid_point = len(words) // 2
251
+ first_part = " ".join(words[:mid_point])
252
+ second_part = " ".join(words[mid_point:])
253
+
254
+ connectors = [
255
+ "Here's the thing:",
256
+ "Let me explain:",
257
+ "Think about it:",
258
+ "What I mean is:",
259
+ "Here's what's interesting:",
260
+ "Check this out:"
261
+ ]
262
+
263
+ connector = random.choice(connectors)
264
+ new_sentence = f"{first_part}. {connector} {second_part.lower()}"
265
+ restructured.append(new_sentence)
266
+ else:
267
+ # Add conversational elements to shorter sentences
268
+ if random.random() < 0.3:
269
+ questions = ["Right?", "You know?", "Make sense?", "See what I mean?", "You feel me?"]
270
+ sentence += f" {random.choice(questions)}"
271
+
272
+ restructured.append(sentence)
 
 
 
 
 
 
 
 
 
273
 
274
+ return " ".join(restructured)
275
 
276
+ def add_specific_examples(self, text):
277
+ """Add specific examples and numbers to make content unique"""
278
  sentences = sent_tokenize(text)
279
+ enhanced = []
280
 
281
+ for sentence in sentences:
282
+ enhanced.append(sentence)
283
+
284
+ # Randomly add specific examples
285
+ if random.random() < 0.25:
286
+ examples = [
287
+ "For example, I saw this happen just last week when...",
288
+ "Like, take my friend Sarah - she tried this and...",
289
+ "I remember reading about a company that increased their results by 150% doing this...",
290
+ "Just yesterday, I was talking to someone who...",
291
+ "There was this study I read that showed 73% of people...",
292
+ "My neighbor actually tried this approach and...",
293
+ ]
294
+ enhanced.append(random.choice(examples))
295
 
296
+ return " ".join(enhanced)
297
 
298
+ def apply_casual_language(self, text):
299
+ """Apply casual language patterns"""
300
+ for pattern, replacements in self.casual_replacements.items():
 
 
 
 
 
 
 
 
 
 
301
  if re.search(pattern, text, re.IGNORECASE):
302
  replacement = random.choice(replacements)
303
  text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
304
 
305
  return text
306
 
307
+ def add_conversational_flow(self, text):
308
+ """Add natural conversational flow"""
309
+ paragraphs = text.split('\n\n')
310
+ conversational_paragraphs = []
311
+
312
+ for para in paragraphs:
313
+ sentences = sent_tokenize(para)
314
+
315
+ # Add transitions between sentences
316
+ enhanced_sentences = []
317
+ for i, sentence in enumerate(sentences):
318
+ enhanced_sentences.append(sentence)
319
+
320
+ # Add conversational bridges
321
+ if i < len(sentences) - 1 and random.random() < 0.2:
322
+ bridges = [
323
+ "And here's the crazy part:",
324
+ "But wait, there's more:",
325
+ "Now, here's where it gets interesting:",
326
+ "Oh, and another thing:",
327
+ "Plus, get this:"
328
+ ]
329
+ enhanced_sentences.append(random.choice(bridges))
330
+
331
+ conversational_paragraphs.append(" ".join(enhanced_sentences))
332
+
333
+ return "\n\n".join(conversational_paragraphs)
334
+
335
+ def final_cleanup_and_polish(self, text):
336
+ """Final cleanup while maintaining human imperfections"""
337
+ # Fix basic spacing
338
  text = re.sub(r'\s+', ' ', text)
339
  text = re.sub(r'\s+([,.!?])', r'\1', text)
340
 
341
+ # Fix capitalization but keep some casual elements
342
  text = re.sub(r'([.!?]\s+)([a-z])', lambda m: m.group(1) + m.group(2).upper(), text)
 
343
 
344
  # Ensure first letter is capitalized
345
  if text and text[0].islower():
346
  text = text[0].upper() + text[1:]
347
 
348
+ # Keep some casual punctuation
349
+ text = re.sub(r'([.!?]){3,}', r'\1\1\1', text)
350
 
351
  return text.strip()
352
 
353
+ def ultra_humanize(self, text, passes=3):
354
+ """Apply multiple passes of humanization for maximum effect"""
355
+ current_text = text
356
+
357
+ for pass_num in range(passes):
358
+ print(f"Humanization pass {pass_num + 1}/{passes}")
359
+
360
+ # Core humanization steps
361
+ current_text = self.aggressive_phrase_elimination(current_text)
362
+ current_text = self.add_extensive_contractions(current_text)
363
+ current_text = self.apply_casual_language(current_text)
364
+ current_text = self.inject_personality(current_text)
365
+ current_text = self.restructure_sentences_completely(current_text)
366
+ current_text = self.add_human_imperfections(current_text)
367
+ current_text = self.add_specific_examples(current_text)
368
+ current_text = self.add_conversational_flow(current_text)
369
+
370
+ # Additional pass-specific enhancements
371
+ if pass_num == 1:
372
+ current_text = self.aggressive_phrase_elimination(current_text) # Second elimination
373
+ elif pass_num == 2:
374
+ current_text = self.inject_personality(current_text) # Extra personality
375
+
376
+ return self.final_cleanup_and_polish(current_text)
377
+
378
  def get_readability_score(self, text):
379
  """Calculate readability score"""
380
  try:
 
388
  except Exception as e:
389
  return f"Could not calculate readability: {str(e)}"
390
 
391
+ def humanize_text(self, text, intensity="ultra"):
392
+ """Main humanization method"""
393
  if not text or not text.strip():
394
  return "Please provide text to humanize."
395
 
 
402
  if not test_tokens:
403
  raise Exception("NLTK tokenization failed")
404
  except Exception as nltk_error:
405
+ return f"NLTK Error: {str(nltk_error)}. Please try again."
406
 
407
+ # Apply ultra-aggressive humanization
408
+ if intensity == "ultra":
409
+ result = self.ultra_humanize(text, passes=3)
410
+ elif intensity == "heavy":
411
+ result = self.ultra_humanize(text, passes=2)
412
+ else:
413
+ result = self.ultra_humanize(text, passes=1)
414
 
415
+ return result
 
 
 
 
 
 
 
 
 
 
 
 
416
 
417
  except Exception as e:
418
+ return f"Error processing text: {str(e)}"
419
 
420
  def create_interface():
421
+ """Create the advanced Gradio interface"""
422
+ humanizer = AdvancedAIHumanizer()
423
 
424
  def process_text(input_text, intensity):
425
  if not input_text:
 
431
  except Exception as e:
432
  return f"Error: {str(e)}", "Processing error"
433
 
434
+ # Custom CSS for better styling
435
+ custom_css = """
436
+ .gradio-container {
437
+ max-width: 1200px !important;
438
+ }
439
+ .main-header {
440
+ text-align: center;
441
+ background: linear-gradient(45deg, #FF6B6B, #4ECDC4);
442
+ -webkit-background-clip: text;
443
+ -webkit-text-fill-color: transparent;
444
+ font-size: 2.5em !important;
445
+ font-weight: bold;
446
+ margin-bottom: 20px;
447
+ }
448
+ """
449
+
450
+ with gr.Blocks(title="Ultra AI Humanizer - 0% Detection", theme=gr.themes.Soft(), css=custom_css) as interface:
451
+ gr.HTML("""
452
+ <div class="main-header">
453
+ πŸ€–βž‘οΈπŸ‘€ ULTRA AI HUMANIZER
454
+ </div>
455
+ <div style="text-align: center; margin-bottom: 30px;">
456
+ <h3>🎯 Achieve TRUE 0% AI Detection Score</h3>
457
+ <p style="font-size: 1.1em; color: #666;">
458
+ Advanced multi-pass humanization with personality injection,
459
+ conversational restructuring, and human imperfection simulation
460
+ </p>
461
+ </div>
462
+ """)
463
 
464
  with gr.Row():
465
+ with gr.Column(scale=1):
466
  input_text = gr.Textbox(
467
+ label="πŸ€– AI-Generated Text Input",
468
+ lines=12,
469
+ placeholder="Paste your AI-generated content here for ultra-humanization...\n\nThe more text you provide, the better the humanization results!",
470
+ info="πŸ’‘ Tip: Longer texts (200+ words) get better humanization results"
471
  )
472
+
473
  intensity = gr.Radio(
474
+ choices=[
475
+ ("Light Humanization", "light"),
476
+ ("Heavy Humanization", "heavy"),
477
+ ("πŸš€ ULTRA Humanization (Recommended)", "ultra")
478
+ ],
479
+ value="ultra",
480
+ label="πŸŽ›οΈ Humanization Intensity",
481
+ info="Ultra mode applies 3 passes of advanced humanization techniques"
482
+ )
483
+
484
+ btn = gr.Button(
485
+ "πŸš€ HUMANIZE TO 0% AI DETECTION",
486
+ variant="primary",
487
+ size="lg",
488
+ elem_id="humanize-btn"
489
  )
 
490
 
491
+ with gr.Column(scale=1):
492
  output_text = gr.Textbox(
493
+ label="πŸ‘€ Humanized Text Output (0% AI Detection)",
494
+ lines=12,
495
+ show_copy_button=True,
496
+ info="βœ… Copy this text - it should pass all AI detectors"
497
  )
498
+
499
+ readability = gr.Textbox(
500
+ label="πŸ“Š Readability Analysis",
501
+ lines=3,
502
+ info="Lower grade levels are more conversational and human-like"
503
+ )
504
+
505
+ gr.HTML("""
506
+ <div style="margin-top: 30px; padding: 20px; background: #f0f8ff; border-radius: 10px;">
507
+ <h3>🎯 How This Achieves 0% AI Detection:</h3>
508
+ <ul style="text-align: left; margin: 10px 0;">
509
+ <li><strong>πŸ”₯ Aggressive Phrase Elimination:</strong> Removes ALL AI-flagged terms and patterns</li>
510
+ <li><strong>πŸ’¬ Personality Injection:</strong> Adds personal voice, opinions, and conversational style</li>
511
+ <li><strong>πŸ”„ Multi-Pass Processing:</strong> Applies humanization techniques multiple times</li>
512
+ <li><strong>🎭 Human Imperfections:</strong> Adds natural hesitations, self-corrections, and casual speech</li>
513
+ <li><strong>πŸ“ Sentence Restructuring:</strong> Completely rebuilds sentence patterns</li>
514
+ <li><strong>πŸ’‘ Specific Examples:</strong> Injects unique examples and personal anecdotes</li>
515
+ </ul>
516
+ <p style="margin-top: 15px; font-weight: bold; color: #2E8B57;">
517
+ ✨ Test your results with Originality.ai, GPTZero, and other AI detectors!
518
+ </p>
519
+ </div>
520
  """)
521
 
522
+ # Event handlers
523
+ btn.click(
524
+ fn=process_text,
525
+ inputs=[input_text, intensity],
526
+ outputs=[output_text, readability]
527
+ )
528
+
529
+ input_text.submit(
530
+ fn=process_text,
531
+ inputs=[input_text, intensity],
532
+ outputs=[output_text, readability]
533
+ )
534
 
535
  return interface
536
 
537
  if __name__ == "__main__":
538
+ print("πŸš€ Starting Ultra AI Humanizer for 0% Detection...")
539
  app = create_interface()
540
+ app.launch(
541
+ server_name="0.0.0.0",
542
+ server_port=7860,
543
+ show_error=True,
544
+ share=False
545
+ )