File size: 5,109 Bytes
f1316e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import os
import json
import gzip
import re
from tqdm import tqdm
from multiprocessing import Pool

langs = ["as", "bn", "gu", "kn", "hi", "ml", "mr", "ne", "or", "sa", "sd", "ta", "ur", "te", "mai"]

language_characters = {
    "as": {"range": (0x0980, 0x09FF)},  # Assamese
    "bn": {"range": (0x0980, 0x09FF)},  # Bengali
    "gu": {"range": (0x0A80, 0x0AFF)},  # Gujarati
    "kn": {"range": (0x0C80, 0x0CFF)},  # Kannada
    "hi": {"range": (0x0900, 0x097F)},  # Hindi
    "ml": {"range": (0x0D00, 0x0D7F)},  # Malayalam
    "mr": {"range": (0x0900, 0x097F)},  # Marathi
    "ne": {"range": (0x0900, 0x097F)},  # Nepali
    "or": {"range": (0x0B00, 0x0B7F)},  # Oriya
    "sa": {"range": (0x0900, 0x097F)},  # Sanskrit
    "sd": {"range": (0x0600, 0x06FF)},  # Sindhi
    "ta": {"range": (0x0B80, 0x0BFF)},  # Tamil
    "ur": {"range": (0x0600, 0x06FF)},  # Urdu
    "te": {"range": (0x0C00, 0x0C7F)},  # Telugu
    "mai": {"range": (0x0900, 0x097F)}  # Maithili
}

language_percentage = 40
num_of_words = 4

def check_language_percentage(text, language):
    unicode_range = language_characters.get(language)

    language_character_count = sum(unicode_range["range"][0] <= ord(char) <= unicode_range["range"][1] for char in text)

    # Calculate the percentage of characters belonging to the specified language
    percentage = (language_character_count / len(text)) * 100

    # Check if the percentage meets the threshold of 30%
    if percentage >= language_percentage:
        return True
    else:
        return False

def valid(json_obj, lang):
    content = json_obj["raw_content"]

    sentences = content.split('\n')

    filtered_sentences = []

    for sentence in sentences:
        end_of_sentence_pattern = r'[।?!]+'
        lines = re.split(end_of_sentence_pattern, sentence)
        lines = [line for line in lines if line != ""]
        filtered_lines = []

        for i in range(len(lines)):
            if(not(lines[i] == "।" or lines[i] == "?" or lines[i] == "!")):
                if(check_language_percentage(lines[i], lang) and len(lines[i].split()) >= num_of_words):
                    filtered_lines.append(lines[i])
                    i += 1
                    if(i+1 < len(lines)):
                        filtered_lines.append(lines[i])
                else:
                    i += 1
            # else: 
            #     filtered_lines.append(lines[i])
        filtered_sentences.append("".join(filtered_lines))
    json_obj["raw_content"] = re.sub(r'\n+', '\n', "\n".join(filtered_sentences))
    return json_obj
    
          


def process_file(file_info):
    input_path, output_path = file_info

    lang = input_path.split('/')[-1].split('_')[0]

    filtered_documents = []
    # Read JSON objects and filter them
    with gzip.open(input_path, 'rt') as input_file:
        for line in input_file:
            json_obj = json.loads(line)
            new_obj = valid(json_obj, lang)
            if(len(new_obj["raw_content"].split()) >= 50):
                filtered_documents.append(new_obj)
       

    # Write filtered JSON objects to new file
    with gzip.open(output_path, 'wt') as output_file:
        for doc in filtered_documents:
            output_file.write(json.dumps(doc, ensure_ascii=False) + '\n')


def filter_json_files(input_folder, output_folder):
    # Create output folder if it doesn't exist
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    file_infos = []

    # Iterate through each JSON file in the input folder
    for filename in sorted(os.listdir(input_folder)):
        if filename.endswith(".json.gz"):
            input_path = os.path.join(input_folder, filename)
            output_path = os.path.join(output_folder, filename)

            file_infos.append((input_path, output_path))    

    # Initialize tqdm with the total number of files
    with tqdm(total=len(file_infos)) as pbar:
        # Create a pool of workers
        with Pool(processes=160) as pool:
            # Use tqdm as a context manager to automatically close the pool and update the progress bar
            for _ in pool.imap_unordered(process_file, file_infos):
                pbar.update()
    
    print("Filtering done for ", input_folder.split('/')[-1])

snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]


for snap in snapshots:
    input_folder = f"/mnt/weka/peacock/wet-data/output/global_filtered_without_bloom_new/{snap}"
    output_folder = f"/mnt/weka/peacock/wet-data/output/heuristic_filtered_without_bloom_new/{snap}"

    filter_json_files(input_folder, output_folder)