|
import os |
|
import gzip |
|
import json |
|
import time |
|
from tqdm import tqdm |
|
import multiprocessing |
|
from multiprocessing import Manager |
|
|
|
manager = multiprocessing.Manager() |
|
processed_domains = manager.dict() |
|
other_selected_domains = manager.list() |
|
|
|
|
|
with open("unique_source_documents.json", 'r') as file: |
|
for obj in json.load(file): |
|
processed_domains[obj["source_domain"]] = 1 |
|
|
|
|
|
|
|
def is_similar(url, source_domain): |
|
url = url.replace("https://", "").replace("http://", "").replace("www.", "").replace(".html", "").replace("home", "").replace("index", "").strip("/") |
|
source_domain = source_domain.replace("www.", "") |
|
return url == source_domain |
|
|
|
def extract_domains_from_file_with_url(filepath): |
|
with gzip.open(filepath, 'rt', encoding='utf-8') as f: |
|
for line in f: |
|
data = json.loads(line) |
|
source_domain = data.get('source_domain') |
|
url = data.get('url') |
|
if not (source_domain in processed_domains) and is_similar(url, source_domain): |
|
processed_domains[source_domain] = 1 |
|
|
|
json_obj = {} |
|
json_obj["url"] = data["url"] |
|
json_obj["source_domain"] = data["source_domain"] |
|
json_obj["title"] = data["title"] |
|
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50]) |
|
|
|
other_selected_domains.append(json_obj) |
|
|
|
def process_file_with_url(file_path, counter, lock): |
|
|
|
if file_path.endswith('.json.gz'): |
|
extract_domains_from_file_with_url(file_path) |
|
|
|
with lock: |
|
counter.value += 1 |
|
|
|
def find_unique_domains_with_url(folder_path): |
|
|
|
manager = Manager() |
|
counter = manager.Value('i', 0) |
|
lock = manager.Lock() |
|
pool = multiprocessing.Pool() |
|
|
|
files_to_be_processed = [] |
|
for snapshot in os.listdir(folder_path): |
|
for file in os.listdir(os.path.join(folder_path, snapshot)): |
|
file_path = os.path.join(folder_path, snapshot, file) |
|
if(file.endswith("json.gz")): |
|
files_to_be_processed.append(file_path) |
|
|
|
total_files = len(files_to_be_processed[:6000]) |
|
progress_bar = tqdm(total=total_files, desc="Processing") |
|
|
|
for file_name in files_to_be_processed[:6000]: |
|
pool.apply_async(process_file_with_url, args=(file_name, counter, lock)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pool.close() |
|
|
|
prev_counter_value = counter.value |
|
|
|
while counter.value < total_files - 1: |
|
progress_bar.update(counter.value - progress_bar.n) |
|
progress_bar.refresh() |
|
|
|
|
|
time.sleep(5) |
|
|
|
|
|
if counter.value == prev_counter_value: |
|
|
|
break |
|
else: |
|
|
|
prev_counter_value = counter.value |
|
|
|
pool.join() |
|
progress_bar.close() |
|
|
|
|
|
|
|
snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new' |
|
|
|
|
|
find_unique_domains_with_url(snapshots_folder) |
|
|
|
with open("new_unique_source_documents_with_url.json", 'w') as file: |
|
json.dump(list(other_selected_domains), file, indent=4, ensure_ascii=False) |
|
|
|
|
|
def extract_domains_from_file(filepath): |
|
with gzip.open(filepath, 'rt', encoding='utf-8') as f: |
|
for line in f: |
|
data = json.loads(line) |
|
source_domain = data.get('source_domain') |
|
if not (source_domain in processed_domains): |
|
processed_domains[source_domain] = 1 |
|
|
|
json_obj = {} |
|
json_obj["url"] = data["url"] |
|
json_obj["source_domain"] = data["source_domain"] |
|
json_obj["title"] = data["title"] |
|
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50]) |
|
|
|
other_selected_domains.append(json_obj) |
|
|
|
def process_file(file_path, counter, lock): |
|
|
|
if file_path.endswith('.json.gz'): |
|
extract_domains_from_file(file_path) |
|
|
|
with lock: |
|
counter.value += 1 |
|
|
|
def find_unique_domains(folder_path): |
|
manager = Manager() |
|
counter = manager.Value('i', 0) |
|
lock = manager.Lock() |
|
pool = multiprocessing.Pool() |
|
|
|
files_to_be_processed = [] |
|
for snapshot in os.listdir(folder_path): |
|
for file in os.listdir(os.path.join(folder_path, snapshot)): |
|
file_path = os.path.join(folder_path, snapshot, file) |
|
if(file.endswith("json.gz")): |
|
files_to_be_processed.append(file_path) |
|
|
|
total_files = len(files_to_be_processed[:6000]) |
|
progress_bar = tqdm(total=total_files, desc="Processing") |
|
|
|
for file_name in files_to_be_processed[:6000]: |
|
pool.apply_async(process_file, args=(file_name, counter, lock)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pool.close() |
|
|
|
prev_counter_value = counter.value |
|
|
|
while counter.value < total_files - 1: |
|
progress_bar.update(counter.value - progress_bar.n) |
|
progress_bar.refresh() |
|
|
|
|
|
time.sleep(5) |
|
|
|
|
|
if counter.value == prev_counter_value: |
|
|
|
break |
|
else: |
|
|
|
prev_counter_value = counter.value |
|
|
|
pool.join() |
|
progress_bar.close() |
|
|
|
|
|
snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new' |
|
|
|
|
|
find_unique_domains(snapshots_folder) |
|
|
|
with open("new_unique_source_documents.json", 'w') as file: |
|
json.dump(list(other_selected_domains), file, indent=4, ensure_ascii=False) |