File size: 5,876 Bytes
3d68455 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import os
import gzip
import json
from tqdm import tqdm
processed_domains = set()
# with open("unique_source_documents.json", 'r') as file:
# for obj in json.load(file):
# processed_domains.add(obj["source_domain"])
other_selected_domains = list()
def is_similar(url, source_domain):
url = url.replace("https://", "").replace("http://", "").replace("www.", "").replace(".html", "").replace("home", "").replace("index", "").strip("/")
source_domain = source_domain.replace("www.", "")
return url == source_domain
def extract_domains_from_file_with_url(filepath):
with gzip.open(filepath, 'rt', encoding='utf-8') as f:
for line in f:
data = json.loads(line)
source_domain = data.get('source_domain')
url = data.get('url')
if source_domain not in processed_domains and is_similar(url, source_domain):
processed_domains.add(source_domain)
json_obj = {}
json_obj["url"] = data["url"]
json_obj["source_domain"] = data["source_domain"]
json_obj["title"] = data["title"]
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
other_selected_domains.append(json_obj)
# Function to find unique domains in snapshots folder
def find_unique_domains_with_url(folder_path):
total_files = sum(len(files) for _, _, files in os.walk(folder_path))
progress_bar = tqdm(total=total_files, desc="Processing")
files_to_be_processed = []
for snapshot in os.listdir(folder_path):
for file in os.listdir(os.path.join(folder_path, snapshot)):
file_path = os.path.join(folder_path, snapshot, file)
if(file.endswith("json.gz")):
files_to_be_processed.append(file_path)
for root, dirs, files in os.walk(folder_path):
for file in files[:100]:
print(file)
if file.endswith('.json.gz'):
file_path = os.path.join(root, file)
extract_domains_from_file_with_url(file_path)
progress_bar.update(1)
break
progress_bar.close()
# Path to the snapshots folder
snapshots_folder = f'{base_path}/toxic_filtered_without_bloom_new'
# Find unique domains
find_unique_domains_with_url(snapshots_folder)
with open("new_unique_source_documents_with_url.json", 'w') as file:
json.dump(other_selected_domains, file, indent=4, ensure_ascii=False)
# For remaining domains with no homepage
def extract_domains_from_file(filepath):
with gzip.open(filepath, 'rt', encoding='utf-8') as f:
for line in f:
data = json.loads(line)
source_domain = data.get('source_domain')
if source_domain not in processed_domains:
processed_domains.add(source_domain)
json_obj = {}
json_obj["url"] = data["url"]
json_obj["source_domain"] = data["source_domain"]
json_obj["title"] = data["title"]
json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
other_selected_domains.append(json_obj)
# Function to find unique domains in snapshots folder
def find_unique_domains(folder_path):
total_files = sum(len(files) for _, _, files in os.walk(folder_path))
progress_bar = tqdm(total=total_files, desc="Processing")
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.json.gz'):
file_path = os.path.join(root, file)
extract_domains_from_file(file_path)
progress_bar.update(1)
progress_bar.close()
# Path to the snapshots folder
snapshots_folder = f'{base_path}/toxic_filtered_without_bloom_new'
# Find unique domains
find_unique_domains(snapshots_folder)
with open("new_unique_source_documents.json", 'w') as file:
json.dump(other_selected_domains, file, indent=4, ensure_ascii=False)
# import os
# import gzip
# import json
# from tqdm import tqdm
# other_selected_domains = list()
# processed_domains = set()
# # with open("new_unique_source_documents_with_url.json", 'r') as file:
# # for obj in json.load(file):
# # processed_domains.add(obj["source_domain"])
# # other_selected_domains.append(obj)
# def extract_domains_from_file(filepath):
# with gzip.open(filepath, 'rt', encoding='utf-8') as f:
# for line in f:
# data = json.loads(line)
# source_domain = data.get('source_domain')
# if source_domain not in processed_domains:
# processed_domains.add(source_domain)
# json_obj = {}
# json_obj["url"] = data["url"]
# json_obj["source_domain"] = data["source_domain"]
# json_obj["title"] = data["title"]
# json_obj["raw_content"] = " ".join(data["raw_content"].split()[:50])
# other_selected_domains.append(json_obj)
# # Function to find unique domains in snapshots folder
# def find_unique_domains(folder_path):
# total_files = sum(len(files) for _, _, files in os.walk(folder_path))
# progress_bar = tqdm(total=total_files, desc="Processing")
# for root, dirs, files in os.walk(folder_path):
# for file in files:
# if file.endswith('.json.gz'):
# file_path = os.path.join(root, file)
# extract_domains_from_file(file_path)
# progress_bar.update(1)
# progress_bar.close()
# # Path to the snapshots folder
# snapshots_folder = f'{base_path}/toxic_filtered_without_bloom_new'
# # Find unique domains
# find_unique_domains(snapshots_folder)
# with open("new_unique_source_documents.json", 'w') as file:
# json.dump(other_selected_domains, file, indent=4, ensure_ascii=False) |