|
import os |
|
import json |
|
import gzip |
|
import pandas as pd |
|
from tqdm import tqdm |
|
from multiprocessing import Pool |
|
|
|
langs = ["as", "bn", "gu", "kn", "hi", "ml", "mr", "ne", "or", "sa", "sd", "ta", "ur", "te", "mai"] |
|
|
|
toxic_words = {} |
|
|
|
for lang in langs: |
|
toxic_word = [] |
|
with open(f"toxic_word_list/{lang}.txt", "r") as f: |
|
for line in f: |
|
toxic_word.append(line.strip()) |
|
toxic_words[lang] = set(toxic_word) |
|
|
|
toxic_url = [] |
|
with open("blacklisted_urls.txt", "r") as f: |
|
for line in f: |
|
toxic_url.append(line.strip()) |
|
toxic_urls = set(toxic_url) |
|
|
|
def toxic(json_obj, lang): |
|
url = json_obj["source_domain"] |
|
|
|
if(url in toxic_urls): |
|
return True |
|
|
|
content = json_obj["raw_content"] |
|
|
|
words = content.split() |
|
for word in words: |
|
if(word in toxic_words[lang]): |
|
return True |
|
return False |
|
|
|
|
|
def process_file(file_info): |
|
input_path, output_path = file_info |
|
|
|
lang = input_path.split('/')[-1].split('_')[0] |
|
|
|
filtered_documents = [] |
|
|
|
|
|
with gzip.open(input_path, 'rt') as input_file: |
|
for line in input_file: |
|
json_obj = json.loads(line) |
|
if(not toxic(json_obj, lang)): |
|
filtered_documents.append(json_obj) |
|
|
|
|
|
|
|
with gzip.open(output_path, 'wt') as output_file: |
|
for doc in filtered_documents: |
|
output_file.write(json.dumps(doc, ensure_ascii=False) + '\n') |
|
|
|
|
|
def filter_json_files(input_folder, output_folder): |
|
|
|
if not os.path.exists(output_folder): |
|
os.makedirs(output_folder) |
|
|
|
file_infos = [] |
|
|
|
|
|
for filename in sorted(os.listdir(input_folder)): |
|
if filename.endswith(".json.gz"): |
|
input_path = os.path.join(input_folder, filename) |
|
output_path = os.path.join(output_folder, filename) |
|
|
|
|
|
|
|
file_infos.append((input_path, output_path)) |
|
|
|
|
|
|
|
|
|
with tqdm(total=len(file_infos)) as pbar: |
|
|
|
with Pool(processes=160) as pool: |
|
|
|
for _ in pool.imap_unordered(process_file, file_infos): |
|
pbar.update() |
|
|
|
|
|
print("Filtering done for ", input_folder.split('/')[-1]) |
|
|
|
snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"] |
|
|
|
|
|
|
|
for snap in snapshots: |
|
input_folder = f"/mnt/weka/peacock/wet-data/output/heuristic_filtered_without_bloom_new/{snap}" |
|
output_folder = f"/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new/{snap}" |
|
|
|
filter_json_files(input_folder, output_folder) |
|
|