|
import os |
|
import json |
|
import gzip |
|
import pandas as pd |
|
from tqdm import tqdm |
|
from multiprocessing import Pool, TimeoutError |
|
import signal |
|
import sys |
|
|
|
def process_file(file_info): |
|
input_path, output_path, clusters_file, bloom_file = file_info |
|
|
|
|
|
try: |
|
clusters_df = pd.read_parquet(clusters_file) |
|
except FileNotFoundError: |
|
with open("temp.txt", "a+") as f: |
|
f.write(clusters_file + '\n') |
|
clusters_df = pd.DataFrame() |
|
clusters_df['id'] = [] |
|
clusters_df['id_int'] = [] |
|
clusters_df['clusters_id'] = [] |
|
|
|
clusters_df['line_number'] = clusters_df['id'].apply(lambda x: int(x.split('/')[-1])) |
|
|
|
duplicate_docs = set() |
|
|
|
if(bloom_file != "None"): |
|
bloom_df = pd.read_parquet(bloom_file) |
|
|
|
curr_file = input_path.split('/')[-1] |
|
for _, row in bloom_df.iterrows(): |
|
if(curr_file == row["doc_id"].split('/')[0]): |
|
duplicate_docs.add(int(row["doc_id"].split('/')[1])) |
|
|
|
for _, row in clusters_df.iterrows(): |
|
if(row['id_int'] != row['cluster_id']): |
|
duplicate_docs.add(int(row['id'].split('/')[-1])) |
|
|
|
|
|
with gzip.open(input_path, 'rt') as input_file: |
|
try: |
|
filtered_documents = [json.loads(line) for idx, line in enumerate(input_file) if idx not in duplicate_docs] |
|
except: |
|
print(input_file) |
|
return |
|
|
|
|
|
with gzip.open(output_path, 'wt') as output_file: |
|
for doc in filtered_documents: |
|
output_file.write(json.dumps(doc, ensure_ascii=False) + '\n') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def filter_json_files(input_folder, output_folder, clusters_folder, bloom_folder): |
|
|
|
if not os.path.exists(output_folder): |
|
os.makedirs(output_folder) |
|
|
|
file_infos = [] |
|
|
|
|
|
for filename in sorted(os.listdir(input_folder)): |
|
if filename.endswith(".json.gz"): |
|
input_path = os.path.join(input_folder, filename) |
|
output_path = os.path.join(output_folder, filename) |
|
|
|
|
|
clusters_file = os.path.join(clusters_folder, filename.split('_')[0], f"{filename.split('_')[-1].split('.')[0]}.clusters.parquet") |
|
try: |
|
bloom_file = os.path.join(bloom_folder, filename.split('_')[0], [x for x in os.listdir(os.path.join(bloom_folder, filename.split('_')[0])) if x.endswith(".parquet")][-1]) |
|
except FileNotFoundError: |
|
bloom_file = "None" |
|
|
|
|
|
file_infos.append((input_path, output_path, clusters_file, bloom_file)) |
|
|
|
|
|
|
|
|
|
with tqdm(total=len(file_infos)) as pbar: |
|
|
|
with Pool(processes=160) as pool: |
|
|
|
for _ in pool.imap_unordered(process_file, file_infos): |
|
pbar.update() |
|
|
|
|
|
print("Filtering done for ", input_folder.split('/')[-1]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"] |
|
|
|
snapshots = [sys.argv[1]] |
|
|
|
for snap in snapshots: |
|
input_folder = f"/mnt/weka/peacock/wet-data/output/mined/{snap}" |
|
output_folder = f"/mnt/weka/peacock/wet-data/output/local_filtered/{snap}" |
|
clusters_folder = f"/mnt/weka/peacock/wet-data/output/fuzzy-clusters/{snap}" |
|
bloom_folder = f"/mnt/weka/peacock/wet-data/output/bloomfilter/{snap}" |
|
|
|
filter_json_files(input_folder, output_folder, clusters_folder, bloom_folder) |
|
|