File size: 5,747 Bytes
f1316e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os
import json
import gzip
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool, TimeoutError
import signal
import sys

def process_file(file_info):
    input_path, output_path, clusters_file, bloom_file = file_info

    # Read line numbers to avoid from clusters file
    try:
        clusters_df = pd.read_parquet(clusters_file)
    except FileNotFoundError:
        with open("temp.txt", "a+") as f:
            f.write(clusters_file + '\n')
        clusters_df = pd.DataFrame()
        clusters_df['id'] = []
        clusters_df['id_int'] = []
        clusters_df['clusters_id'] = []

    clusters_df['line_number'] = clusters_df['id'].apply(lambda x: int(x.split('/')[-1]))

    duplicate_docs = set()

    if(bloom_file != "None"):
        bloom_df = pd.read_parquet(bloom_file)

        curr_file = input_path.split('/')[-1]
        for _, row in bloom_df.iterrows():
            if(curr_file == row["doc_id"].split('/')[0]):
                duplicate_docs.add(int(row["doc_id"].split('/')[1]))

    for _, row in clusters_df.iterrows():
        if(row['id_int'] != row['cluster_id']):
            duplicate_docs.add(int(row['id'].split('/')[-1]))

    # Read JSON objects and filter them
    with gzip.open(input_path, 'rt') as input_file:
        try:
            filtered_documents = [json.loads(line) for idx, line in enumerate(input_file) if idx not in duplicate_docs]
        except:
            print(input_file)
            return

    # Write filtered JSON objects to new file
    with gzip.open(output_path, 'wt') as output_file:
        for doc in filtered_documents:
            output_file.write(json.dumps(doc, ensure_ascii=False) + '\n')



# def process_file_with_timeout(arg):
#     def handler(signum, frame):
#         raise TimeoutError("end of time")
#     signal.signal(signal.SIGALRM, handler)
#     signal.alarm(500)
#     try:
#         process_file(arg)
#     except TimeoutError:
#         print("timeout")
#         return

def filter_json_files(input_folder, output_folder, clusters_folder, bloom_folder):
    # Create output folder if it doesn't exist
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    file_infos = []

    # Iterate through each JSON file in the input folder
    for filename in sorted(os.listdir(input_folder)):
        if filename.endswith(".json.gz"):
            input_path = os.path.join(input_folder, filename)
            output_path = os.path.join(output_folder, filename)

            # Determine the corresponding clusters file
            clusters_file = os.path.join(clusters_folder, filename.split('_')[0], f"{filename.split('_')[-1].split('.')[0]}.clusters.parquet")
            try:
                bloom_file = os.path.join(bloom_folder, filename.split('_')[0], [x for x in os.listdir(os.path.join(bloom_folder, filename.split('_')[0])) if x.endswith(".parquet")][-1])
            except FileNotFoundError:
                bloom_file = "None"

            # print((input_path, output_path, clusters_file))
            file_infos.append((input_path, output_path, clusters_file, bloom_file))    

    

    # Initialize tqdm with the total number of files
    with tqdm(total=len(file_infos)) as pbar:
        # Create a pool of workers
        with Pool(processes=160) as pool:
            # Use tqdm as a context manager to automatically close the pool and update the progress bar
            for _ in pool.imap_unordered(process_file, file_infos):
                pbar.update()
    

    print("Filtering done for ", input_folder.split('/')[-1])

# snapshots = {}
# snapshots[0] = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18"]
# snapshots[0] = ["2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18"]

# snapshots[1] = ["2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24"]
# snapshots[2] = ["2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39"] 
# snapshots[3] = ["2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

# snapshots = ["2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

# snapshots = ["2019-51"]



snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

snapshots = [sys.argv[1]]

for snap in snapshots:
    input_folder = f"/mnt/weka/peacock/wet-data/output/mined/{snap}"
    output_folder = f"/mnt/weka/peacock/wet-data/output/local_filtered/{snap}"
    clusters_folder = f"/mnt/weka/peacock/wet-data/output/fuzzy-clusters/{snap}"
    bloom_folder = f"/mnt/weka/peacock/wet-data/output/bloomfilter/{snap}"

    filter_json_files(input_folder, output_folder, clusters_folder, bloom_folder)