applied-ai-018's picture
Add files using upload-large-folder tool
f1316e7 verified
import os
import gzip
import json
import time
from tqdm import tqdm
import multiprocessing
from multiprocessing import Manager
manager = multiprocessing.Manager()
processed_domains = manager.dict()
other_selected_domains = manager.list()
# processed_domains = set()
with open("unique_source_documents.json", 'r') as file:
for obj in json.load(file):
processed_domains[obj["source_domain"]] = 1
# other_selected_domains = list()
def is_similar(url, source_domain):
url = url.replace("https://", "").replace("http://", "").replace("www.", "").replace(".html", "").replace("home", "").replace("index", "").strip("/")
source_domain = source_domain.replace("www.", "")
return url == source_domain
def extract_domains_from_file_with_url(filepath):
with gzip.open(filepath, 'rt', encoding='utf-8') as f:
for line in f:
data = json.loads(line)
source_domain = data.get('source_domain')
url = data.get('url')
if not (source_domain in processed_domains) and is_similar(url, source_domain):
processed_domains[source_domain] = 1
json_obj = {}
json_obj["url"] = data["url"]
json_obj["source_domain"] = data["source_domain"]
json_obj["title"] = data["title"]
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
other_selected_domains.append(json_obj)
def process_file_with_url(file_path, counter, lock):
# Define the function to process each file
if file_path.endswith('.json.gz'):
extract_domains_from_file_with_url(file_path)
# Increment the counter to track progress
with lock:
counter.value += 1
def find_unique_domains_with_url(folder_path):
# Create a multiprocessing pool
manager = Manager()
counter = manager.Value('i', 0)
lock = manager.Lock()
pool = multiprocessing.Pool()
files_to_be_processed = []
for snapshot in os.listdir(folder_path):
for file in os.listdir(os.path.join(folder_path, snapshot)):
file_path = os.path.join(folder_path, snapshot, file)
if(file.endswith("json.gz")):
files_to_be_processed.append(file_path)
total_files = len(files_to_be_processed[:6000])
progress_bar = tqdm(total=total_files, desc="Processing")
for file_name in files_to_be_processed[:6000]:
pool.apply_async(process_file_with_url, args=(file_name, counter, lock))
# for root, dirs, files in os.walk(folder_path):
# for file in files:
# file_path = os.path.join(root, file)
# # Add the file processing task to the pool
# pool.apply_async(process_file_with_url, args=(file_path, counter))
# break
pool.close()
prev_counter_value = counter.value
while counter.value < total_files - 1:
progress_bar.update(counter.value - progress_bar.n)
progress_bar.refresh()
# Wait for 5 seconds
time.sleep(5)
# Check if counter value has changed
if counter.value == prev_counter_value:
# Counter value hasn't changed for 5 seconds, break out of the loop
break
else:
# Update the previous counter value
prev_counter_value = counter.value
pool.join()
progress_bar.close()
# Path to the snapshots folder
snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new'
# Find unique domains
find_unique_domains_with_url(snapshots_folder)
with open("new_unique_source_documents_with_url.json", 'w') as file:
json.dump(list(other_selected_domains), file, indent=4, ensure_ascii=False)
def extract_domains_from_file(filepath):
with gzip.open(filepath, 'rt', encoding='utf-8') as f:
for line in f:
data = json.loads(line)
source_domain = data.get('source_domain')
if not (source_domain in processed_domains):
processed_domains[source_domain] = 1
json_obj = {}
json_obj["url"] = data["url"]
json_obj["source_domain"] = data["source_domain"]
json_obj["title"] = data["title"]
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
other_selected_domains.append(json_obj)
def process_file(file_path, counter, lock):
# Define the function to process each file
if file_path.endswith('.json.gz'):
extract_domains_from_file(file_path)
# Increment the counter to track progress
with lock:
counter.value += 1
def find_unique_domains(folder_path):
manager = Manager()
counter = manager.Value('i', 0)
lock = manager.Lock()
pool = multiprocessing.Pool()
files_to_be_processed = []
for snapshot in os.listdir(folder_path):
for file in os.listdir(os.path.join(folder_path, snapshot)):
file_path = os.path.join(folder_path, snapshot, file)
if(file.endswith("json.gz")):
files_to_be_processed.append(file_path)
total_files = len(files_to_be_processed[:6000])
progress_bar = tqdm(total=total_files, desc="Processing")
for file_name in files_to_be_processed[:6000]:
pool.apply_async(process_file, args=(file_name, counter, lock))
# for root, dirs, files in os.walk(folder_path):
# for file in files:
# file_path = os.path.join(root, file)
# # Add the file processing task to the pool
# pool.apply_async(process_file_with_url, args=(file_path, counter))
# break
pool.close()
prev_counter_value = counter.value
while counter.value < total_files - 1:
progress_bar.update(counter.value - progress_bar.n)
progress_bar.refresh()
# Wait for 5 seconds
time.sleep(5)
# Check if counter value has changed
if counter.value == prev_counter_value:
# Counter value hasn't changed for 5 seconds, break out of the loop
break
else:
# Update the previous counter value
prev_counter_value = counter.value
pool.join()
progress_bar.close()
# Path to the snapshots folder
snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new'
# Find unique domains
find_unique_domains(snapshots_folder)
with open("new_unique_source_documents.json", 'w') as file:
json.dump(list(other_selected_domains), file, indent=4, ensure_ascii=False)