applied-ai-018's picture
Add files using upload-large-folder tool
f1316e7 verified
# import os
# import gzip
# import json
# from tqdm import tqdm
# processed_domains = set()
# with open("unique_source_documents.json", 'r') as file:
# for obj in json.load(file):
# processed_domains.add(obj["source_domain"])
# other_selected_domains = list()
# def is_similar(url, source_domain):
# url = url.replace("https://", "").replace("http://", "").replace("www.", "").replace(".html", "").replace("home", "").replace("index", "").strip("/")
# source_domain = source_domain.replace("www.", "")
# return url == source_domain
# def extract_domains_from_file_with_url(filepath):
# with gzip.open(filepath, 'rt', encoding='utf-8') as f:
# for line in f:
# data = json.loads(line)
# source_domain = data.get('source_domain')
# url = data.get('url')
# print("Inside function")
# if source_domain not in processed_domains and is_similar(url, source_domain):
# processed_domains.add(source_domain)
# json_obj = {}
# json_obj["url"] = data["url"]
# json_obj["source_domain"] = data["source_domain"]
# json_obj["title"] = data["title"]
# json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
# other_selected_domains.append(json_obj)
# # Function to find unique domains in snapshots folder
# def find_unique_domains_with_url(folder_path):
# total_files = sum(len(files) for _, _, files in os.walk(folder_path))
# progress_bar = tqdm(total=total_files, desc="Processing")
# files_to_be_processed = []
# for snapshot in os.listdir(folder_path):
# for file in os.listdir(os.path.join(folder_path, snapshot)):
# file_path = os.path.join(folder_path, snapshot, file)
# if(file.endswith("json.gz")):
# files_to_be_processed.append(file_path)
# for root, dirs, files in os.walk(folder_path):
# for file in files[:100]:
# print(file)
# if file.endswith('.json.gz'):
# file_path = os.path.join(root, file)
# extract_domains_from_file_with_url(file_path)
# progress_bar.update(1)
# break
# progress_bar.close()
# # Path to the snapshots folder
# snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new'
# # Find unique domains
# find_unique_domains_with_url(snapshots_folder)
# with open("new_unique_source_documents_with_url.json", 'w') as file:
# json.dump(other_selected_domains, file, indent=4, ensure_ascii=False)
# def extract_domains_from_file(filepath):
# with gzip.open(filepath, 'rt', encoding='utf-8') as f:
# for line in f:
# data = json.loads(line)
# source_domain = data.get('source_domain')
# if source_domain not in processed_domains:
# processed_domains.add(source_domain)
# json_obj = {}
# json_obj["url"] = data["url"]
# json_obj["source_domain"] = data["source_domain"]
# json_obj["title"] = data["title"]
# json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
# other_selected_domains.append(json_obj)
# # Function to find unique domains in snapshots folder
# def find_unique_domains(folder_path):
# total_files = sum(len(files) for _, _, files in os.walk(folder_path))
# progress_bar = tqdm(total=total_files, desc="Processing")
# for root, dirs, files in os.walk(folder_path):
# for file in files:
# if file.endswith('.json.gz'):
# file_path = os.path.join(root, file)
# extract_domains_from_file(file_path)
# progress_bar.update(1)
# progress_bar.close()
# # Path to the snapshots folder
# snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new'
# # Find unique domains
# find_unique_domains(snapshots_folder)
# with open("new_unique_source_documents.json", 'w') as file:
# json.dump(other_selected_domains, file, indent=4, ensure_ascii=False)
import os
import gzip
import json
from tqdm import tqdm
other_selected_domains = list()
processed_domains = set()
with open("new_unique_source_documents_with_url.json", 'r') as file:
for obj in json.load(file):
processed_domains.add(obj["source_domain"])
other_selected_domains.append(obj)
def extract_domains_from_file(filepath):
with gzip.open(filepath, 'rt', encoding='utf-8') as f:
for line in f:
data = json.loads(line)
source_domain = data.get('source_domain')
if source_domain not in processed_domains:
processed_domains.add(source_domain)
json_obj = {}
json_obj["url"] = data["url"]
json_obj["source_domain"] = data["source_domain"]
json_obj["title"] = data["title"]
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
other_selected_domains.append(json_obj)
# Function to find unique domains in snapshots folder
def find_unique_domains(folder_path):
total_files = sum(len(files) for _, _, files in os.walk(folder_path))
progress_bar = tqdm(total=total_files, desc="Processing")
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.json.gz'):
file_path = os.path.join(root, file)
extract_domains_from_file(file_path)
progress_bar.update(1)
progress_bar.close()
# Path to the snapshots folder
snapshots_folder = '/mnt/weka/peacock/wet-data/output/toxic_filtered_without_bloom_new'
# Find unique domains
find_unique_domains(snapshots_folder)
with open("new_unique_source_documents.json", 'w') as file:
json.dump(other_selected_domains, file, indent=4, ensure_ascii=False)