File size: 4,210 Bytes
f1316e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import json
import gzip
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool

def process_file(file_info):
    input_path, output_path, clusters_file = file_info

    # Read line numbers to avoid from clusters file
    if(clusters_file != "<NOT_EXISTS>"):
        try:
            clusters_df = pd.read_parquet(clusters_file)
        except FileNotFoundError:
            with open("temp.txt", "a+") as f:
                f.write(clusters_file + '\n')
            clusters_df = pd.DataFrame()
            clusters_df['id'] = []
            clusters_df['id_int'] = []
            clusters_df['clusters_id'] = []
    else:
        with open("temp.txt", "a+") as f:
            f.write(clusters_file + '\n')
        clusters_df = pd.DataFrame()
        clusters_df['id'] = []
        clusters_df['id_int'] = []
        clusters_df['clusters_id'] = []

    clusters_df['line_number'] = clusters_df['id'].apply(lambda x: int(x.split('/')[-1]))

    duplicate_docs = set()

    for _, row in clusters_df.iterrows():
        if(row['id_int'] != row['cluster_id']):
            duplicate_docs.add(int(row['id'].split('/')[-1]))

    # Read JSON objects and filter them
    with gzip.open(input_path, 'rt') as input_file:
        try:
            filtered_documents = [json.loads(line) for idx, line in enumerate(input_file) if idx not in duplicate_docs]
        except:
            print(input_file)
            return

    # Write filtered JSON objects to new file
    with gzip.open(output_path, 'wt') as output_file:
        for doc in filtered_documents:
            output_file.write(json.dumps(doc, ensure_ascii=False) + '\n')


def filter_json_files(input_folder, output_folder, clusters_folder, snap):
    # Create output folder if it doesn't exist
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    file_infos = []

    clusters_file = "<NOT_EXISTS>"
    # Iterate through each JSON file in the input folder
    for filename in sorted(os.listdir(input_folder)):
        if filename.endswith(".json.gz"):
            input_path = os.path.join(input_folder, filename)
            output_path = os.path.join(output_folder, filename)

            # Determine the corresponding clusters file
            for i in range(1, 11):
                clusters_file_path = os.path.join(clusters_folder, f"total_{i}" , filename.split('_')[0], f"{snap}_{filename.split('_')[-1].split('.')[0]}.clusters.parquet")

                if(os.path.exists(clusters_file_path)):
                    clusters_file = clusters_file_path
                    break

            # print((input_path, output_path, clusters_file))
            file_infos.append((input_path, output_path, clusters_file))    

    

    # Initialize tqdm with the total number of files
    with tqdm(total=len(file_infos)) as pbar:
        # Create a pool of workers
        with Pool(processes=160) as pool:
            # Use tqdm as a context manager to automatically close the pool and update the progress bar
            for _ in pool.imap_unordered(process_file, file_infos):
                pbar.update()
    

    print("Filtering done for ", input_folder.split('/')[-1])

snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

# snapshots = ["2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

for snap in snapshots:
    input_folder = f"/mnt/weka/peacock/wet-data/output/local_filtered_without_bloom/{snap}"
    output_folder = f"/mnt/weka/peacock/wet-data/output/global_filtered_without_bloom_new/{snap}"
    clusters_folder = f"/mnt/weka/peacock/wet-data/output/fuzzy-clusters"

    filter_json_files(input_folder, output_folder, clusters_folder, snap)