File size: 2,981 Bytes
f1316e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import os
import gzip
import json
from tqdm import tqdm
import multiprocessing
from multiprocessing import Manager

manager = multiprocessing.Manager()
processed_domains = manager.dict()
other_selected_domains = manager.list()

# processed_domains = set()
with open("unique_source_documents.json", 'r') as file:
    for obj in json.load(file):
        processed_domains[obj["source_domain"]] = 1

# other_selected_domains = list()

def is_similar(url, source_domain):
    url = url.replace("https://", "").replace("http://", "").replace("www.", "").replace(".html", "").replace("home", "").replace("index", "").strip("/")
    source_domain = source_domain.replace("www.", "")
    return url == source_domain

def extract_domains_from_file_with_url(filepath):
    with gzip.open(filepath, 'rt', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line)
            source_domain = data.get('source_domain')
            url = data.get('url')
            print("here")
            if not (source_domain in processed_domains) and is_similar(url, source_domain):
                processed_domains[source_domain] = 1

                json_obj = {}
                json_obj["url"] = data["url"]
                json_obj["source_domain"] = data["source_domain"]
                json_obj["title"] = data["title"]
                json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])

                other_selected_domains.append(json_obj)
            print("Done")

def process_file_with_url(file_path, counter):
    # Define the function to process each file
    print("Inside process")
    if file_path.endswith('.json.gz'):
        extract_domains_from_file_with_url(file_path)
    # Increment the counter to track progress
    counter.value += 1

def find_unique_domains_with_url(folder_path):
    total_files = 10 # sum(len(files) for _, _, files in os.walk(folder_path))
    progress_bar = tqdm(total=total_files, desc="Processing")

    # Create a multiprocessing pool
    manager = Manager()
    counter = manager.Value('i', 0)
    # pool = multiprocessing.Pool()
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            # Add the file processing task to the pool
            process_file_with_url(file_path, counter)
            # pool.apply_async(process_file_with_url, args=(file_path, counter))
        break

    # pool.close()

    # while counter.value < total_files:
    #     progress_bar.update(counter.value - progress_bar.n)
    #     progress_bar.refresh()
    
    # pool.join()
    progress_bar.close()


# Path to the snapshots folder
snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new'

# Find unique domains
find_unique_domains_with_url(snapshots_folder)

with open("new_unique_source_documents_with_url.json", 'w') as file:
    json.dump(list(other_selected_domains), file, indent=4, ensure_ascii=False)