File size: 1,598 Bytes
804c12f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import re
import ftfy
def fix_utf8_encoding(text):
if text is None:
return ""
return ftfy.fix_text(text)
# Adapted from:
# https://github.com/bigscience-workshop/data-preparation/blob/main/preprocessing/training/01b_oscar_cleaning_and_filtering/filtering.py#L95
whitespace = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
def normalize_whitespace(text):
chars = [char if char not in whitespace else " " for char in text]
text = "".join(chars)
return text
unicode_punctuation = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
def normalize_punctuation(text):
chars = [unicode_punctuation.get(char, char) for char in text]
text = "".join(chars)
return text
def remove_empty_lines(text):
lines = text.splitlines()
func = lambda x: not re.match(r'^\s*$', x)
filtered = filter(func, lines)
text = "\n".join(filtered)
if text is None or isinstance(text, str):
text = ""
return text
def clean_new_lines(text):
text = text.strip()
text = text.replace("\n", "")
return text
|