applied-ai-018's picture
Add files using upload-large-folder tool
f1316e7 verified
import os
import gzip
import json
import sys
from concurrent.futures import ThreadPoolExecutor
import random
from tqdm import tqdm
num_docs = 10
def select_random_documents(args):
folder_path, lang_id, output_file = args
# Find all JSON.gz files for the given language ID
file_paths = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.startswith(lang_id + "_") and f.endswith('.json.gz')]
if not file_paths:
print("No files found for the given language ID.")
return
##############################################################
selected_documents = []
for file in file_paths:
with gzip.open(file, 'rt') as f:
for line in f:
json_obj = json.loads(line)
url = json_obj["url"].replace("https://", "").replace("http://", "").replace("www.", "")
if(url.endswith("/")):
url = url[:-1]
if(json_obj["source_domain"].replace("www.", "") == url):
selected_documents.append(json_obj)
return selected_documents
##############################################################
# Select a random file
random_file = random.choice(file_paths)
# Read random file and select 10 random documents
selected_documents = []
with gzip.open(random_file, 'rt') as f:
for line in f:
json_obj = json.loads(line)
selected_documents.append(json_obj)
if len(selected_documents) >= num_docs:
break
# Write selected documents to output file
with open(output_file, 'w') as f:
json.dump(selected_documents, f, indent=4, ensure_ascii=False)
print(f"Randomly selected {num_docs} documents from {random_file}")
# Example usage:
# folder_path = sys.argv[1]
# lang_id = sys.argv[2] # Example language ID
# output_file = 'selected_documents.json'
# snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]
# snap = random.choice(snapshots)
# select_random_documents(os.path.join(folder_path, snap), lang_id, output_file)
folder_path = "../output/toxic_filtered_without_bloom_new/"
lang_id = "hi"
output_file = 'random_documents_hi.json'
snapshots = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]
# all_docs = []
# for snap in snapshots:
# documents = select_random_documents(os.path.join(folder_path, snap), lang_id, output_file)
# all_docs.extend(documents)
all_docs = []
to_process = [(os.path.join(folder_path, snap), lang_id, output_file) for snap in snapshots]
with ThreadPoolExecutor() as executor:
for documents in tqdm(executor.map(select_random_documents, to_process), total=len(snapshots), desc="Processing Snapshots"):
# Extend the list of all documents with the documents from the current snapshot
all_docs.extend(documents)
with open(output_file, 'w') as f:
json.dump(all_docs, f, indent=4, ensure_ascii=False)