File size: 3,487 Bytes
f1316e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import json
import gzip
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool

langs = ["as", "bn", "gu", "kn", "hi", "ml", "mr", "ne", "or", "sa", "sd", "ta", "ur", "te", "mai"]

toxic_words = {}

for lang in langs:
    toxic_word = []
    with open(f"toxic_word_list/{lang}.txt", "r") as f:
        for line in f:
            toxic_word.append(line.strip())
    toxic_words[lang] = set(toxic_word)

toxic_url = []
with open("blacklisted_urls.txt", "r") as f:
    for line in f:
        toxic_url.append(line.strip())
toxic_urls = set(toxic_url)

def toxic(json_obj, lang):
    url = json_obj["source_domain"]

    if(url in toxic_urls):
        return True
    
    content = json_obj["raw_content"]

    words = content.split()
    for word in words:
        if(word in toxic_words[lang]):
            return True
    return False


def process_file(file_info):
    input_path, output_path = file_info

    lang = input_path.split('/')[-1].split('_')[0]
    # Read JSON objects and filter them
    filtered_documents = []


    with gzip.open(input_path, 'rt') as input_file:
        for line in input_file:
            json_obj  = json.loads(line)
            if(not toxic(json_obj, lang)):
                filtered_documents.append(json_obj)
       

    # Write filtered JSON objects to new file
    with gzip.open(output_path, 'wt') as output_file:
        for doc in filtered_documents:
            output_file.write(json.dumps(doc, ensure_ascii=False) + '\n')


def filter_json_files(input_folder, output_folder):
    # Create output folder if it doesn't exist
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    file_infos = []

    # Iterate through each JSON file in the input folder
    for filename in sorted(os.listdir(input_folder)):
        if filename.endswith(".json.gz"):
            input_path = os.path.join(input_folder, filename)
            output_path = os.path.join(output_folder, filename)


            # print((input_path, output_path, clusters_file))
            file_infos.append((input_path, output_path))    

    

    # Initialize tqdm with the total number of files
    with tqdm(total=len(file_infos)) as pbar:
        # Create a pool of workers
        with Pool(processes=160) as pool:
            # Use tqdm as a context manager to automatically close the pool and update the progress bar
            for _ in pool.imap_unordered(process_file, file_infos):
                pbar.update()
    

    print("Filtering done for ", input_folder.split('/')[-1])

snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

# snapshots = ["2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

for snap in snapshots:
    input_folder = f"/mnt/weka/peacock/wet-data/output/heuristic_filtered_without_bloom_new/{snap}"
    output_folder = f"/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new/{snap}"

    filter_json_files(input_folder, output_folder)