python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import torch
import json
import os
import pickle
from tqdm import tqdm
from collections import defaultdict
import numpy
from scipy.spatial.distance import mahalanobis
from sklearn import mixture
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--num_clusters", type=int, default=15)
parser.add_argument("--clustering", type=str, choices=['gmm', 'agglomerative'], required=True)
parser.add_argument("--distance", type=str, default='cosine', choices=['cosine', 'euclidean'])
parser.add_argument("--representations", type=str)
parser.add_argument("--pca", action="store_true")
parser.add_argument("--num_pca_components", type=int, default=50)
parser.add_argument("--data", type=str, default="data/p3_data_simplified.json")
parser.add_argument("--output_prefix", type=str, required=True)
parser.add_argument("--batch_size", type=int, default=10)
args = parser.parse_args()
model_name = args.model
num_clusters = args.num_clusters
data = json.load(open(args.data))
print(f"Model: {args.model}")
print(f"Num clusters: {args.num_clusters}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model.cuda()
if args.clustering == "gmm":
cluster_dir = f"{args.output_prefix}/p3_dev_{model_name.replace('/', '_')}_final_layer_gmm_clusters/"
else:
cluster_dir = f"{args.output_prefix}/p3_dev_{model_name.replace('/', '_')}_final_layer_aggl_{args.distance}_clusters/"
print(f"Clusters directory: {cluster_dir}")
if not os.path.exists(cluster_dir):
os.makedirs(cluster_dir)
instances = []
for dataset_info in data.values():
for value in dataset_info["validation"].values():
value["split"] = "validation"
instances.append(value)
for value in dataset_info["train"].values():
value["split"] = "train"
instances.append(value)
batches = []
i = 0
while i < len(instances):
batches.append(instances[i:i+args.batch_size])
i += args.batch_size
if args.representations is None:
print("Computing representations")
all_representations = None
i = 0
for batch in tqdm(batches):
input_data = tokenizer.batch_encode_plus([instance["input"] for instance in batch],
return_tensors="pt",
padding=True)
target_data = tokenizer.batch_encode_plus([instance["target"] for instance in batch],
return_tensors="pt",
padding=True)
input_ids = input_data['input_ids'].cuda()
labels = target_data['input_ids'].cuda()
# (batch_size, num_tokens)
mask = input_data['attention_mask'].cuda()
model_outputs = model(input_ids=input_ids,
attention_mask=mask,
labels=labels,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = model_outputs["encoder_last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
for representation in pooled_hidden_states:
representation = representation.detach().cpu().numpy()
if all_representations is None:
all_representations = numpy.zeros((len(instances), representation.shape[0]))
all_representations[i] = representation
i += 1
with open(os.path.join(cluster_dir, "final_layer_representations.pkl"), "wb") as outfile:
pickle.dump(all_representations, outfile)
else:
all_representations = pickle.load(open(args.representations, "rb"))
if args.pca:
print("Running PCA")
pca = PCA(n_components=50, random_state=0)
all_representations = pca.fit_transform(all_representations)
if args.clustering == "gmm":
print("Clustering with Gaussian Mixture")
gmm = mixture.GaussianMixture(
n_components=num_clusters,
covariance_type='full',
max_iter=150,
random_state=0
)
gmm = gmm.fit(all_representations)
cluster_labels = gmm.predict(all_representations)
with open(os.path.join(cluster_dir, "cluster_means.pkl"), "wb") as outfile:
pickle.dump(gmm.means_, outfile)
with open(os.path.join(cluster_dir, "cluster_covars.pkl"), "wb") as outfile:
pickle.dump(gmm.covars_, outfile)
cluster_distances = numpy.zeros((num_clusters, num_clusters))
inverse_covariances = [numpy.linalg.inv(x) for x in gmm.covars_]
for i in range(num_clusters):
for j in range(num_clusters):
cluster_distances[i][j] = mahalanobis(gmm.means_[i], gmm.means_[j], inverse_covariances[j])
else:
print(f"Clustering with an Agglomerative clustering algorithm using {args.distance} distance")
clustering = AgglomerativeClustering(
n_clusters=num_clusters,
affinity=args.distance,
compute_distances=True,
)
clustering = clustering.fit(all_representations)
cluster_distances = clustering.distances_
cluster_labels = clustering.labels_
with open(os.path.join(cluster_dir, "cluster_distances.pkl"), "wb") as outfile:
pickle.dump(cluster_distances, outfile)
cluster_counts = [0] * 15
for label in cluster_labels:
cluster_counts[label] += 1
print("Cluster counts:", cluster_counts)
cluster_index_map = defaultdict(lambda: defaultdict(lambda: {'train': [], 'validation': []}))
for cluster_label, instance in zip(cluster_labels, instances):
cluster_index_map[cluster_label][instance['dataset']][instance['split']].append(instance['index'])
for cluster_label, cluster_data in cluster_index_map.items():
with open(os.path.join(cluster_dir, f"cluster_{cluster_label}_indices.pkl"), "wb") as outfile:
pickle.dump(dict(cluster_data), outfile)
| data-efficient-finetuning-main | scripts/make_final_rep_clusters.py |
import faiss
import argparse
import torch
import numpy
import json
import tqdm
from scipy.stats import entropy
import pickle
import os
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
numpy.random.seed(20389)
parser = argparse.ArgumentParser()
parser.add_argument("--search_output", type=str)
parser.add_argument("--training_data", type=str)
parser.add_argument("--encoded_training_data", type=str)
parser.add_argument("--index", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--encoding_batch_size", type=int)
parser.add_argument("--sorted_training_data", type=str)
parser.add_argument("--output_separate_files", action="store_true", help="write output after each iteration of kmeans++ ")
parser.add_argument("--training_data_distances", type=str)
parser.add_argument("--acquisition_batch_size", type=int, default=1000)
args = parser.parse_args()
dev_retrieved_indices = set()
for line in open(args.search_output):
datum = json.loads(line)
for id_ in datum["ids"]:
dev_retrieved_indices.add(id_)
def encode_batch(batched_inputs):
input_data = tokenizer.batch_encode_plus(batched_inputs,
return_tensors="pt",
padding=True)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda(device=cuda_devices[0])
mask = mask.cuda(device=cuda_devices[0])
encoder_outputs = encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
return pooled_hidden_states.detach().cpu().numpy()
if os.path.exists(args.training_data_distances):
print(f"Reading pickled distances")
distance_data = pickle.load(open(args.training_data_distances, "rb"))
training_data_distances = distance_data["training_data_distances"]
training_distances_from_dev_retrieved = distance_data["training_dev_retrieved_distances"]
else:
if os.path.exists(args.encoded_training_data):
print(f"Reading encoded training data from {args.encoded_training_data}")
training_data_matrix = pickle.load(open(args.encoded_training_data, "rb"))
else:
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
assert torch.cuda.is_available()
cuda_devices = list(range(torch.cuda.device_count()))
print(f"Using CUDA devices {cuda_devices} for encoding training data")
model.cuda(device=cuda_devices[0])
model.eval()
encoder = torch.nn.DataParallel(model.encoder, device_ids=cuda_devices)
encoded_training_data = []
with torch.inference_mode():
batch = []
for line in tqdm.tqdm(open(args.training_data)):
instance = json.loads(line)
batch.append(instance["input"])
if len(batch) == args.encoding_batch_size:
encoded_batch = encode_batch(batch)
batch = []
encoded_training_data.append(encoded_batch)
if batch:
encoded_batch = encode_batch(batch)
encoded_training_data.append(encoded_batch)
training_data_matrix = numpy.concatenate(encoded_training_data)
print(f"Dumping encoded training data at {args.encoded_training_data}")
with open(args.encoded_training_data, "wb") as outfile:
pickle.dump(training_data_matrix, outfile)
print("Loading index..")
index = faiss.read_index(args.index)
print("Done loading index")
opq_matrix = faiss.downcast_VectorTransform(index.chain.at(0))
retrieved_data_matrix = numpy.asarray([index.reconstruct(i) for i in dev_retrieved_indices])
# OPQ transform
opq_training_data = opq_matrix.apply(training_data_matrix)
opq_retrieved_data = opq_matrix.apply(retrieved_data_matrix)
training_data_distances = faiss.pairwise_distances(opq_training_data, opq_training_data)
training_retrieved_distances = faiss.pairwise_distances(opq_training_data, opq_retrieved_data)
training_distances_from_dev_retrieved = training_retrieved_distances.min(axis=1)
with open(args.training_data_distances, "wb") as outfile:
pickle.dump(
{
"training_data_distances": training_data_distances,
"training_dev_retrieved_distances": training_distances_from_dev_retrieved
},
outfile
)
print("sorting training data using a KMeans++ like algorithm")
# We sort training data based on their distance to the retrieved set + the sorted set built so far.
# This is equivalent to running KMeans++ with the dev-retrieved set as a fixed set of centroids, and adding
# in-task training data as new centroids. Instead of stopping at a pre-determined number of cetroids, we
# sort the entire in-task training set while choosing a set of points (as many as args.acquisition_batch_size)
# at a time greedily.
sorted_training_indices = []
distances_to_selected = training_distances_from_dev_retrieved
print(f"Training to retrieved distances: {min(distances_to_selected)}, {numpy.mean(distances_to_selected)}, {max(distances_to_selected)}")
print(f"Training data distances: {numpy.min(training_data_distances)}, {numpy.mean(training_data_distances)}, {numpy.max(training_data_distances)}")
print(f"Number of negative training data distances: {numpy.sum(training_data_distances < 0.0)}")
print("Making all negative inter-training data distances 0")
training_data_distances = training_data_distances * (training_data_distances >= 0.0)
num_training_points = len(training_distances_from_dev_retrieved)
selection_pool = list(range(num_training_points))
if args.output_separate_files:
outfile_prefix = args.sorted_training_data.replace(".jsonl", "")
else:
outfile = open(args.sorted_training_data, "w")
training_data_lines = open(args.training_data).readlines()
with tqdm.tqdm(total=num_training_points) as pbar:
set_index = 0
while selection_pool:
if args.output_separate_files:
outfile = open(f"{outfile_prefix}_set{set_index}.jsonl", "w")
if len(selection_pool) <= args.acquisition_batch_size:
next_points = selection_pool
else:
distance_distribution = distances_to_selected / numpy.sum(distances_to_selected)
uniform_entropy = entropy([1./len(selection_pool)] * len(selection_pool))
print(f"Entropy: {entropy(distance_distribution)} (uniform: {uniform_entropy})")
next_points = numpy.random.choice(
selection_pool,
args.acquisition_batch_size,
p=distance_distribution
)
next_points = list(set(next_points))
sorted_training_indices.extend(next_points)
next_point_set = set(next_points)
# Update distances and selection set
next_distances = []
next_pool = []
for ind, distance in zip(selection_pool, distances_to_selected):
# If the point is in the set of next points, we remove it from the selection pool.
if ind not in next_point_set:
next_pool.append(ind)
distance = min(distance, min([training_data_distances[ind][j] for j in next_points]))
next_distances.append(distance)
selection_pool = next_pool
distances_to_selected = numpy.asarray(next_distances)
if args.output_separate_files:
for ind in sorted_training_indices:
print(training_data_lines[ind].strip(), file=outfile)
set_index += 1
pbar.update(len(next_points))
if not args.output_separate_files:
for ind in sorted_training_indices:
print(training_data_lines[ind].strip(), file=outfile)
| data-efficient-finetuning-main | scripts/sort_training_data.py |
import json
from tqdm import tqdm
import sys
p3_instance_filename = sys.argv[1]
pyserini_output_filename = sys.argv[2]
with open(p3_instance_filename, 'r') as f:
with open(pyserini_output_filename, "w") as writefile:
for i, line in tqdm(enumerate(f)):
data = json.loads(line)
pyserini_sample = {
"id": i,
"contents": data["input"],
}
writefile.write(json.dumps(pyserini_sample) + "\n")
| data-efficient-finetuning-main | scripts/convert_pyserini.py |
import json
import gzip
import argparse
import os
from datasets import load_from_disk
parser = argparse.ArgumentParser()
parser.add_argument("--datasets", type=str, help="Json file containing the list of P3 datasets to load")
parser.add_argument("--output_prefix", type=str, required=True)
parser.add_argument("--data_cache", type=str, required=True, help="location of the data cache")
parser.add_argument("--text_instances_file", type=str, required=True, help="output filename for processed data")
args = parser.parse_args()
data_cache = args.data_cache
train_tasks_list = json.load(open(args.datasets))
if not os.path.exists(args.output_prefix):
os.makedirs(args.output_prefix)
instances = []
instances_datasets_info = []
for dataset_name in train_tasks_list:
data_path = os.path.join(data_cache, dataset_name)
if not os.path.exists(data_path):
print(f"{data_path} not found!")
continue
dataset = load_from_disk(data_path)
for split_name in dataset.keys():
for i, instance_info in enumerate(dataset[split_name]):
instances.append({
"input": instance_info['inputs_pretokenized'],
"target": instance_info['targets_pretokenized']
})
if "answer_choices" in instance_info:
instances[-1]["answer_choices"] = instance_info["answer_choices"]
instances_datasets_info.append((dataset_name, split_name, i))
with gzip.open(text_instances_file, "wb") as outfile:
json.dump(instances, outfile, indent=2)
with gzip.open(os.path.join(args.output_prefix, "p3_train_instances_dataset_indices.json"), "wb") as outfile:
json.dump(instances_datasets_info, outfile, indent=2)
| data-efficient-finetuning-main | scripts/write_p3_train_instances.py |
import json
from collections import defaultdict
# We'll keep only one copy of instance of all the prompt-variants
data_sizes = {k: len(v['validation']) + len(v['train']) for k, v in json.load(open('p3_data.json')).items()}
simplified_p3_data = defaultdict(lambda: {"validation": {}, "train": {}})
num_all_instances = 0
num_kept_instances = 0
for grouped_instance_info in json.load(open("p3_prompt_grouped_data.json")).values():
dataset_groups = defaultdict(list)
for instance_info in grouped_instance_info:
num_all_instances += 1
dataset_groups[instance_info["input"]["dataset"]].append((instance_info["input"], instance_info["cluster_id"]))
largest_dataset = None
for key in dataset_groups:
if largest_dataset is None or data_sizes[key] > data_sizes[largest_dataset]:
largest_dataset = key
for instance, cluster_id in dataset_groups[largest_dataset]:
if instance["is_correct"]:
simplified_p3_data[largest_dataset]["validation" if "test" in cluster_id else "train"][instance["index"]] = instance
num_kept_instances += 1
print(f"Kept {num_kept_instances} of {num_all_instances}")
with open("p3_data_simplified.json", "w") as outfile:
json.dump(simplified_p3_data, outfile, indent=2)
| data-efficient-finetuning-main | scripts/simplify_p3_data.py |
import json
import sys
filename = sys.argv[1]
desired_size = int(sys.argv[2])
outfilename = sys.argv[3]
all_ids = []
all_distances = []
with open(filename, 'r') as f:
for line in f:
sample = json.loads(line)
all_ids.append(sample['ids'])
all_distances.append(sample['distances'])
def flatten_dedup_list(id_list):
return list(set([i for sublist in id_list for i in sublist]))
cur_ret = 1000
cur_ids = flatten_dedup_list(all_ids)
# we trim ids off the end until we hit desired size
while len(cur_ids) > desired_size:
all_ids = [i[:-1] for i in all_ids]
cur_ids = flatten_dedup_list(all_ids)
cur_ret -= 1
print(f"Reduced down to {cur_ret} retrieved cur size {len(cur_ids)}", end='\r')
print(f'\nReached {len(cur_ids)} (as close to {desired_size} as poss) at ret {cur_ret}. Dumping now...')
# then save :)
with open(outfilename, 'w') as w:
for i in cur_ids:
w.write(str(i) + '\n')
| data-efficient-finetuning-main | scripts/reduce_indices_to_balanced.py |
import json
import os
import sys
import pickle
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import mixture
from sklearn.metrics.pairwise import euclidean_distances
num_clusters = 15
data = json.load(open("p3_data_simplified.json"))
#data = json.load(open("p3_data.json"))
cluster_dir = "/home/pradeepd/data/p3_dev_tfidf_clusters/"
#cluster_dir = "/p3_dev_full_tfidf_clusters/"
instances = []
for dataset_info in data.values():
for value in dataset_info["validation"].values():
value["split"] = "validation"
instances.append(value)
for value in dataset_info["train"].values():
value["split"] = "train"
instances.append(value)
vectorizer = TfidfVectorizer(decode_error='replace', strip_accents='unicode', analyzer='word', stop_words='english')
indexed_data = vectorizer.fit_transform([i["input"] for i in instances])
svd = TruncatedSVD(n_components=50, random_state=0) # Cannot use PCA for sparse matrices. This is essentially LSA.
ld_indexed_data = svd.fit_transform(indexed_data)
gmm = mixture.GaussianMixture(
n_components=num_clusters,
covariance_type='full',
max_iter=150,
random_state=0
)
gmm = gmm.fit(ld_indexed_data)
cluster_labels = gmm.predict(ld_indexed_data)
cluster_distances = euclidean_distances(gmm.means_)
cluster_counts = [0] * 15
for label in cluster_labels:
cluster_counts[label] += 1
print("Cluster counts:", cluster_counts)
cluster_index_map = defaultdict(lambda: defaultdict(lambda: {'train': [], 'validation': []}))
for cluster_label, instance in zip(cluster_labels, instances):
cluster_index_map[cluster_label][instance['dataset']][instance['split']].append(instance['index'])
for cluster_label, cluster_data in cluster_index_map.items():
with open(os.path.join(cluster_dir, f"cluster_{cluster_label}_indices.pkl"), "wb") as outfile:
pickle.dump(dict(cluster_data), outfile)
with open(os.path.join(cluster_dir, "cluster_distances.pkl"), "wb") as outfile:
pickle.dump(cluster_distances, outfile)
| data-efficient-finetuning-main | scripts/make_tf_idf_clusters.py |
from tqdm import tqdm
from datasets import load_dataset
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--p3_output_file", type=str, required=True)
args = parser.parse_args()
tasks = open('data/t0_prompt_tasks.txt', 'r')
outputfile = open(args.p3_output_file, 'w')
for task in tqdm(tasks):
ds = load_dataset("bigscience/P3", task.strip(), split="train")
for sample in ds:
outputfile.write(json.dumps({
'input': sample['inputs_pretokenized'],
'target': sample['targets_pretokenized']
}) + '\n')
| data-efficient-finetuning-main | scripts/download_p3.py |
import argparse
from fileinput import filename
import faiss
import torch
import gzip
import json
import tqdm
from collections import defaultdict
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from attribution.huggingface_readers import (
CaseHOLDReader,
RTEReader,
CBReader,
HellaSwagReader,
StoryClozeReader,
WinoGrandeReader,
WSCReader,
COPAReader,
WiCReader,
ANLIR1Reader,
ANLIR2Reader,
ANLIR3Reader
)
from attribution.drop_reader import DropMReader
from attribution.qasper_reader import QasperEvidencePromptReader
from attribution.p3_jsonl_reader import P3ClusterReader
parser = argparse.ArgumentParser()
parser.add_argument("--index", type=str, required=True)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--search_output", type=str, required=True)
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--num_neighbors_search", type=int, default=3000)
parser.add_argument("--query_size", type=int, default=1000)
parser.add_argument("--p3_data", type=str, help="If provided, will write training data to `training_data`")
parser.add_argument("--training_data", type=str)
parser.add_argument("--p3_dataset_indices", type=str, help="If provided, will compute P3 dataset stats")
parser.add_argument("--outfile_location", type=str, required=True, help="Where to output all the retrieved data points")
args = parser.parse_args()
data_files = []
training_data = []
datasets = [
"rte",
"anli_r1",
"anli_r2",
"anli_r3",
"wic",
"copa",
"wsc",
"winogrande",
"hellaswag",
"cb",
"story_cloze",
"casehold",
"drop"
]
for dataset in datasets:
if dataset == "rte":
shorthand = "rte/32_shot"
elif dataset == "anli_r1":
shorthand = "anli-r1/50_shot"
elif dataset == "anli_r2":
shorthand = "anli-r2/50_shot"
elif dataset == "anli_r3":
shorthand = "anli-r3/50_shot"
elif dataset == "cb":
shorthand = "cb/32_shot"
elif dataset == "copa":
shorthand = "copa/32_shot"
elif dataset == "hellaswag":
shorthand = "h-swag/20_shot"
elif dataset == "story_cloze":
shorthand = "storycloze/70_shot"
elif dataset == "wic":
shorthand = "wic/32_shot"
elif dataset == "winogrande":
shorthand = "winogrande/50_shot"
else:
shorthand = "wsc/32_shot"
for seed in [0, 1, 32, 42, 1024]:
data_files.append(f"data/few_shot/{shorthand}/{seed}_seed.jsonl")
training_data.append(f"{dataset}_{seed}.jsonl")
# readers for each dataset.
# we use train splits for t0 tasks, custom splits for other.
readers = [
RTEReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
ANLIR1Reader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
ANLIR2Reader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
ANLIR3Reader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
WiCReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
COPAReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
WSCReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
WinoGrandeReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
HellaSwagReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
CBReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
StoryClozeReader(model_name=args.model, split_name='train', use_val_split=False, return_original_instance=True),
CaseHOLDReader(model_name=args.model, split_name='validation', return_original_instance=True),
DropMReader(model_name=args.model, split_name='validation', return_original_instance=True)
]
# load index once into ram.
print('loading index...')
index = faiss.read_index(args.index)
print('index loaded!')
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
if torch.cuda.is_available():
model.cuda()
model.eval()
per_ds_per_neighbour_indices_frequencies = {d: {} for d in datasets}
max_index = 0
#for data_file, out_file, reader in zip(data_files,training_data):
for dataset, reader in zip(datasets, readers):
print(f"Retreiving over {dataset}")
neighbours_to_write = [args.num_neighbors_search]
query_size = args.query_size
args.num_neighbors_search = args.num_neighbors_search
#reader = StoryClozeReader(model_name=args.model, split_name='validation')
#reader = CaseHOLDReader(model_name=args.model, split_name='validation')
#reader = RTEReader(model_name=args.model, split_name='train')
#reader = P3ClusterReader(model_name=args.model)
#reader = DROPReader(model_name=args.model, split_name='validation')
filename = 'dummy'
def query_index(queries):
input_data = tokenizer.batch_encode_plus(queries,
return_tensors="pt",
padding=True)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda()
mask = mask.cuda()
encoder_outputs = model.encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
pooled_hidden_states_np = pooled_hidden_states.detach().cpu().numpy()
return index.search(pooled_hidden_states_np, k=args.num_neighbors_search)
g = lambda: defaultdict(int)
per_ds_per_neighbour_indices_frequencies[dataset] = defaultdict(g)
outputfile = open(f"{args.search_output}_{dataset}_idxes.jsonl", "w")
batch = []
with torch.inference_mode():
for instance_idx, instance in enumerate(tqdm.tqdm(reader.read(filename))):
if instance_idx >= query_size:
break
batch.append({"query": instance["pretokenized_input"]})
if len(batch) == args.batch_size:
batch_distances, batch_indices = query_index([i["query"] for i in batch])
for instance, distances, indices in zip(batch, batch_distances, batch_indices):
ids = [int(id_) for id_ in indices]
for neigh in neighbours_to_write:
for id_ in ids[:neigh]:
per_ds_per_neighbour_indices_frequencies[dataset][neigh][id_] += 1
distances = [float(distance) for distance in distances]
datum = {"ids": ids, "distances": distances}
print(json.dumps(datum), file=outputfile)
outputfile.flush()
batch = []
print(f"\nDone searching for {dataset}.")
for indices_frequencies in per_ds_per_neighbour_indices_frequencies[dataset].values():
mx_i = max(indices_frequencies.keys())
max_index = max(mx_i, max_index)
print("Done searching for all datasets. Now writing data...")
# pause in case i havent made the file yet.
# create to save
# import pickle
# indices = [per_ds_per_neighbour_indices_frequencies[dataset][neighbours_to_write[0]] for dataset in datasets]
# pickle.dump(indices, open('tmp_indices.pkl', 'w'))
# indices = pickle.load(open('tmp_indices.pkl', 'rb'))
outfiles = [f'{args.outfile_location}/{outfile}_{args.query_size}q_{args.num_neighbors_search}n.jsonl' for outfile in datasets]
files = [open(o, "w") for o in outfiles]
for i, line in tqdm.tqdm(enumerate(open(args.p3_data))):
if i > max_index:
break
for j, dataset in enumerate(datasets):
indices_frequencies = per_ds_per_neighbour_indices_frequencies[dataset][neighbours_to_write[0]]
if i in indices_frequencies:
instance = json.loads(line)
instance["index_id"] = i
instance["attribution_frequency"] = indices_frequencies[i]
print(json.dumps(instance), file=files[j])
# for i, line in tqdm.tqdm(enumerate(open(args.p3_data))):
# for j, (per_ds_per_neighbour_indices_frequencies, dataset) in enumerate(zip(indices, datasets)):
# if i in per_ds_per_neighbour_indices_frequencies:
# instance = json.loads(line)
# instance["index_id"] = i
# instance["attribution_frequency"] = per_ds_per_neighbour_indices_frequencies[i]
# print(json.dumps(instance), file=files[j])
print(f"\n Done writing training data for all datsets")
| data-efficient-finetuning-main | scripts/retrieve_training_data_rank.py |
import json
from tqdm import tqdm
from collections import defaultdict
import os
cluster_data = json.load(open("./t0_cluster_data.json"))
prompt_dataset_groups = defaultdict(list)
for instances in cluster_data.values():
for instance in instances:
prompt_dataset_groups[instance['dataset']].append(instance)
def infer_prefix_suffix(group):
group_inputs = list(set([x['input'] for x in group]))
instance1 = group_inputs[0]
group_prefix = None
group_suffix = None
for instance2 in group_inputs[1:]:
for i in range(1, len(instance1)):
if instance1[:i] != instance2[:i]:
prefix = instance1[:i-1]
break
if group_prefix is None:
group_prefix = prefix
else:
group_prefix = prefix if len(prefix) < len(group_prefix) else group_prefix
for i in range(1, len(instance1)):
if instance1[-i:] != instance2[-i:]:
suffix = instance1[-(i-1):] if i != 1 else ''
break
if group_suffix is None:
group_suffix = suffix
else:
group_suffix = suffix if len(suffix) < len(group_suffix) else group_suffix
return prefix, suffix
if os.path.exists("p3_prompts.json"):
print("Prompts file exists!")
else:
prompts = {}
print("Inferring prompts..")
for prompt_dataset_name, group in tqdm(prompt_dataset_groups.items()):
prompt_prefix, prompt_suffix = infer_prefix_suffix(group)
prompts[prompt_dataset_name] = {"prefix": prompt_prefix, "suffix": prompt_suffix}
with open("p3_prompts.json", "w") as outfile:
json.dump(prompts, outfile, indent=2)
| data-efficient-finetuning-main | scripts/infer_prompts.py |
import json
import sys
import tqdm
from collections import defaultdict
full_data = json.load(open(sys.argv[1]))
input_data = json.load(open(sys.argv[2]))
output_file = sys.argv[3]
special_dataset_prefixes = ["hellaswag", "winogrande", "super_glue_copa"]
dataset_options_dict = defaultdict(set)
instances_options_dict = defaultdict(lambda: {"instances": [], "options": []})
def is_dataset_special(dataset_name):
for prefix in special_dataset_prefixes:
if dataset_name.startswith(prefix):
return True
return False
for dataset_name, dataset_info in full_data.items():
for split in ["validation", "train"]:
for instance_id, instance_info in dataset_info[split].items():
if is_dataset_special(dataset_name):
instances_options_dict[instance_info["input"]]["instances"].append((dataset_name, split, instance_id))
instances_options_dict[instance_info["input"]]["options"].append(instance_info["target"])
else:
dataset_options_dict[dataset_name].add(instance_info["target"])
instance_options = {}
for options_info in instances_options_dict.values():
for instance_id in options_info["instances"]:
instance_options[instance_id] = options_info["options"]
dataset_options_dict = {k: list(v) for k, v in dataset_options_dict.items()}
print(f"Accumulated dataset options of size {len(dataset_options_dict)}")
for dataset_name, dataset_info in tqdm.tqdm(input_data.items()):
for split in ["validation", "train"]:
for instance_id, instance_info in dataset_info[split].items():
if is_dataset_special(dataset_name):
options = instance_options[(dataset_name, split, instance_id)]
else:
options = dataset_options_dict[dataset_name]
instance_info["options"] = options
print("Added options to all instances")
with open("dataset_options.json", "w") as outfile:
json.dump(dataset_options_dict, outfile, indent=2)
with open("instance_specific_options.jsonl", "w") as outfile:
for k, v in instance_options.items():
print(json.dumps({"dataset": k[0], "split": k[1], "instance_id": k[2], "options": v}), file=outfile)
with open(output_file, "w") as outfile:
json.dump(input_data, outfile, indent=2)
| data-efficient-finetuning-main | scripts/add_options.py |
import sys
import random
from attribution.huggingface_readers import (
RTEReader,
ANLIR1Reader,
ANLIR2Reader,
ANLIR3Reader,
WiCReader,
WSCReader,
WinoGrandeReader,
HellaSwagReader,
COPAReader,
CBReader,
StoryClozeReader,
CaseHOLDReader
)
from attribution.drop_reader import DropMReader
from attribution.qasper_reader import QasperEvidencePromptReader
# qasper is the only reader that requires a file
qasper_file = sys.argv[1]
def print_dataset(dataset_generator, outfile_name):
with open(outfile_name, "w") as outfile:
for i, instance in enumerate(dataset_generator):
if i >= 1000:
break
outfile.write(str(i) + "\t" + instance["pretokenized_input"].replace("\n", "").replace("\r", "").replace("\t", " ") + "\n")
ds_names = [
"rte",
"anli_r1",
"anli_r2",
"anli_r3",
"wic",
"copa",
"wsc",
"winogrande",
"hellaswag",
"cb",
"storycloze",
"casehold",
"drop",
]
readers = [
RTEReader(split_name='train', use_val_split=False, return_original_instance=True),
ANLIR1Reader(split_name='train', use_val_split=False, return_original_instance=True),
ANLIR2Reader(split_name='train', use_val_split=False, return_original_instance=True),
ANLIR3Reader(split_name='train', use_val_split=False, return_original_instance=True),
WiCReader(split_name='train', use_val_split=False, return_original_instance=True),
COPAReader(split_name='train', use_val_split=False, return_original_instance=True),
WSCReader(split_name='train', use_val_split=False, return_original_instance=True),
WinoGrandeReader(split_name='train', use_val_split=False, return_original_instance=True),
HellaSwagReader(split_name='train', use_val_split=False, return_original_instance=True),
CBReader(split_name='train', use_val_split=False, return_original_instance=True),
StoryClozeReader(split_name='train', use_val_split=False, return_original_instance=True),
CaseHOLDReader(split_name='validation', return_original_instance=True),
DropMReader(split_name='validation', return_original_instance=True)
]
for ds, reader in zip(ds_names, readers):
print("printing out dataset: ", ds)
print_dataset(reader.read('dummy'), f"queries/{ds}.tsv")
# qasper we handle special
print("printing out dataset: qasper")
qasper_reader = QasperEvidencePromptReader(return_original_query=True)
qasper_instances = [i for i in qasper_reader.read(qasper_file)]
random.shuffle(qasper_instances)
print_dataset(qasper_instances, "queries/qasper.tsv")
| data-efficient-finetuning-main | scripts/dump_dataset_queries.py |
import sys
import os
import json
from tqdm import tqdm
from attribution.ni_reader import NaturalInstructionsReader
from natural_instructions.task_eval_splits import ALL_EVAL_SPLITS
outfolder = sys.argv[1]
reader = NaturalInstructionsReader(
return_original_instance=True,
split_name='test',
add_task_definition=False,
num_pos_examples=0
)
split_queries = {k: [] for k in ALL_EVAL_SPLITS.keys() if 'cause' in k}
for instance in tqdm(reader.read('dummy')):
for k, v in ALL_EVAL_SPLITS.items():
if 'cause' not in k:
continue
if instance['preindexed_id'].split('-')[0] in v:
split_queries[k].append({
'input': instance['pretokenized_input'],
'target': instance['pretokenized_target'],
'id': instance['preindexed_id']
})
for k, v in split_queries.items():
with open(os.path.join(outfolder, f"{k}_queries.jsonl"), 'w') as w:
for sample in tqdm(v):
w.write(json.dumps(sample) + '\n')
| data-efficient-finetuning-main | scripts/generate_natural_instructions_query_splits.py |
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--predictions', type=str, required=True)
parser.add_argument('--data_file', type=str, required=True)
args = parser.parse_args()
references = {}
data = json.load(open(args.data_file))
for paper_data in data.values():
for qa_info in paper_data["qas"]:
qid = qa_info['question_id']
all_evidence = []
for answer_info in qa_info['answers']:
all_evidence.append(answer_info['answer']['evidence'])
references[qid] = all_evidence
predictions = {}
for line in open(args.predictions, 'r'):
datum = json.loads(line)
question_ids = [d["question_id"] for d in datum["metadata"]]
paragraphs = [d["paragraph"] for d in datum["metadata"]]
answer_choices = datum["metadata"][0]["answer_options"]
response_indices = datum["response"]
responses = [answer_choices[i] for i in response_indices]
for qid, response, paragraph in zip(question_ids, responses, paragraphs):
if qid not in predictions:
predictions[qid] = []
if "Yes" in response:
predictions[qid].append(paragraph)
num_non_nulls = sum([p != [] for p in predictions.values()])
print(len([l for l in open(args.predictions, 'r')]))
print(f"Non null predictions: {num_non_nulls} / {len(predictions)} ({round(num_non_nulls / len(predictions) * 100, 2)}%)")
precision = 0
recall = 0
f1 = 0
base_precision = 0
base_recall = 0
base_f1 = 0
def compute_metrics(predictions, refs):
if not refs:
return (0.0, 0.0, 0.0) if predictions else (1.0, 1.0, 1.0)
overlap = set(refs).intersection(predictions)
precision = len(overlap) / len(predictions) if predictions else 1.0
recall = len(overlap) / len(refs)
f1 = (2*precision*recall / (precision + recall)) if (precision + recall) != 0 else 0.0
return (precision, recall, f1)
for qid, q_references in references.items():
metrics = [compute_metrics(predictions[qid], refs) for refs in q_references]
max_precision, max_recall, max_f1 = sorted(metrics, key=lambda x: x[2], reverse=True)[0]
precision += max_precision
recall += max_recall
f1 += max_f1
baseline_metrics = [compute_metrics([], refs) for refs in q_references]
max_b_precision, max_b_recall, max_b_f1 = sorted(baseline_metrics, key=lambda x: x[2], reverse=True)[0]
base_precision += max_b_precision
base_recall += max_b_recall
base_f1 += max_b_f1
print(f"Precision: {precision/len(references)}")
print(f"Recall: {recall/len(references)}")
print(f"F1: {f1/len(references)}")
print("\nBaseline:")
print(f"Precision: {base_precision/len(references)}")
print(f"Recall: {base_recall/len(references)}")
print(f"F1: {base_f1/len(references)}")
| data-efficient-finetuning-main | scripts/evaluate_qasper_evidence_predictions.py |
import json
from tqdm import tqdm
from collections import defaultdict
prompts = json.load(open("p3_prompts.json"))
cluster_data = json.load(open("./t0_cluster_data.json"))
raw_instances = defaultdict(list)
print("Grouping instances..")
prompt_issues_with_datasets = defaultdict(list)
for cluster_id, data in tqdm(cluster_data.items()):
for instance in data:
dataset = instance['dataset']
prefix = prompts[dataset]['prefix']
mids = prompts[dataset]['mid']
suffix = prompts[dataset]['suffix']
input_with_prompt = instance['input']
original_input = instance['input'][len(prefix):]
if len(suffix) != 0:
original_input = original_input[:-len(suffix)]
input_parts = []
input_to_split = original_input
for mid_part in mids:
if mid_part == "":
continue
if mid_part not in input_to_split:
prompt_issues_with_datasets[dataset].append(original_input)
continue
parts = input_to_split.split(mid_part, 1)
split_input, rest = parts
input_parts.append(split_input)
input_to_split = rest
input_parts.append(input_to_split)
raw_instances[" ||| ".join(input_parts)].append({"cluster_id": cluster_id, "input": instance, "parts": input_parts})
with open('p3_prompt_grouping_errors.json', 'w') as outfile:
json.dump(prompt_issues_with_datasets, outfile, indent=2)
print(f"Splitting issues in datasets: {prompt_issues_with_datasets.keys()}")
with open("p3_prompt_grouped_data.json", "w") as outfile:
json.dump(raw_instances, outfile, indent=2)
| data-efficient-finetuning-main | scripts/group_instances_without_prompts.py |
import sys
# assumed format (from pyserini)
# 0 Q0 41840491 10 51.878502 Anserini
def parse_example(line):
example = line.split(" ")
# get the rank and the index
rank = int(example[3])
index = example[2]
return rank, index
# filter the examples we keep
def filter_examples(examples, max_rank):
return [(r, i) for (r, i) in examples if r < max_rank]
def construct_set_examples(examples):
return list(set(i for _, i in examples))
output_file = open(sys.argv[1], 'r')
limit = int(sys.argv[3])
examples = [parse_example(l.strip()) for l in output_file]
max_rank = 500
num_samples = len(construct_set_examples(examples))
while num_samples > limit:
max_rank -= 1
nu_examples = filter_examples(examples, max_rank)
index_list = construct_set_examples(nu_examples)
nu_num_samples = len(index_list)
# we will err to letting bm25 having more examples to be fair :)
if nu_num_samples <= limit:
print(f'\n\nWe have filtered down to size {num_samples} at max rank {max_rank+1} and finish\n')
outfile = open(sys.argv[2], 'w')
for idx in index_list:
outfile.write(f'{idx}\n')
outfile.close()
break
else:
examples = nu_examples
num_samples = nu_num_samples
print(f'\rWe have filtered down to size {nu_num_samples} at max rank {max_rank}', end='')
| data-efficient-finetuning-main | scripts/parse_pyserini_output.py |
import json
import sys
stuff = {}
file = open(sys.argv[1], 'r')
for line in file:
sample = json.loads(line)
stuff[sample['query_id'][0]] = sample['answer'][0]
with open('drop_preds.json', 'w') as f:
json.dump(stuff, f)
| data-efficient-finetuning-main | scripts/evaluate_unfair_tos_preds.py |
import pickle
from datasets import load_dataset
import os
import json
from collections import defaultdict
from tqdm import tqdm
import sys
cluster_indices_directory = "" # location of p3 cluster indices
clusters_text = defaultdict(list)
errors = []
for filename in tqdm(os.listdir(cluster_indices_directory)):
fullpath = os.path.join(cluster_indices_directory, filename)
cluster_id = filename.replace("cluster_", "").replace("_indices.pkl", "")
if "test" in cluster_id:
split = "validation"
else:
split = "train"
cluster_data = pickle.load(open(fullpath, "rb"))
for dataset_id, indices in cluster_data.items():
try:
dataset = load_dataset("bigscience/P3", dataset_id, split=split)
except ValueError:
errors.append({"cluster_id": cluster_id, "dataset_id": dataset_id, "split": split})
continue
indices_set = set(indices)
for index, datum in enumerate(dataset):
if index > max(indices):
break
if index in indices_set:
datum = {
"input": dataset[index]["inputs_pretokenized"],
"target": dataset[index]["targets_pretokenized"],
"dataset": dataset_id,
"index": index
}
if "is_correct" in dataset[index]:
datum["is_correct"] = dataset[index]["is_correct"]
clusters_text[cluster_id].append(datum)
with open("t0_cluster_data.json", "w") as outfile:
json.dump(clusters_text, outfile, indent=2)
with open("t0_cluster_errors.json", "w") as outfile:
json.dump(errors, outfile, indent=2)
print("Sizes:")
for cluster_id, cluster_data in clusters_text.items():
print(f"{cluster_id}: {len(cluster_data)}")
| data-efficient-finetuning-main | scripts/show_clusters.py |
import argparse
import json
import string
from rouge_score import rouge_scorer
from transformers import AutoTokenizer
class GPTTokenizer:
gpt_tokenizer = AutoTokenizer.from_pretrained("gpt2", max_length=1e5)
def tokenize(self, s):
tokens = self.gpt_tokenizer.tokenize(s)
# GPT2 uses Byte-level BPE, which will include space as part of the word.
# But for the first word of a sentence, there is no space before it.
# So, we remove all the added spaces ("Ġ").
tokens = [t.lstrip("Ġ") for t in tokens]
return tokens
default_rouge_scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=True)
xlingual_tokenizer = GPTTokenizer()
xlingual_rouge_scorer = rouge_scorer.RougeScorer(["rougeL"], tokenizer=xlingual_tokenizer)
# adapted the flowing from Squad v1.1 evaluation, without removing the articles.
def normalize_answer(s):
"""Lower text and remove punctuation, and extra whitespace."""
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_punc(lower(s)))
def exact_match(prediction, ground_truth, xlingual=False):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def rouge(prediction, ground_truth, xlingual=False):
if xlingual:
scorer = xlingual_rouge_scorer
else:
scorer = default_rouge_scorer
scores = scorer.score(prediction=prediction, target=ground_truth)
return scores["rougeL"].fmeasure
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, xlingual=False):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth, xlingual=xlingual)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_metrics(predictions, references, xlingual=False):
assert len(predictions) == len(
references
), f"# of predictions {len(predictions)} doesn't match # of references {len(references)}."
em, rougeL = 0, 0
for pred, gold in zip(predictions, references):
assert isinstance(gold, list)
em += metric_max_over_ground_truths(
exact_match, prediction=pred, ground_truths=gold, xlingual=xlingual
)
rougeL += metric_max_over_ground_truths(
rouge, prediction=pred, ground_truths=gold, xlingual=xlingual
)
em = 100.0 * em / len(references)
rougeL = 100.0 * rougeL / len(references)
metrics = {"exact_match": em, "rougeL": rougeL}
metrics = {k: round(v, 4) for k, v in metrics.items()}
return metrics
def compute_grouped_metrics(predictions, references, groups, xlingual=False):
assert len(predictions) == len(references) == len(groups)
examples_by_group = {}
for pred, gold, group in zip(predictions, references, groups):
if group not in examples_by_group:
examples_by_group[group] = []
examples_by_group[group].append((pred, gold))
results = {}
for group, group_examples in examples_by_group.items():
task_predictions, task_references = zip(*group_examples)
group_metrics = compute_metrics(task_predictions, task_references, xlingual=xlingual)
for metric, value in group_metrics.items():
results[f"{metric}_for_{group}"] = value
return results
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--prediction_file",
required=True,
help="Jsonl file with each line corresponding to a prediction. "
"Each json object should have an `id` and a `prediction` key.",
)
parser.add_argument(
"--reference_file",
required=True,
help="Jsonl file with each line corresponding to a reference. "
"Each json object should have an `id` and a `references` key. "
"`task_id`, `task_category` and `task_track` are optional, which will be used to "
"compute the per-task performance, per-category performance and the "
"performance for default (english) / xlingual Tracks.",
)
parser.add_argument("--output_file", help="Jsonl file to write the results to.")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
eval_instances = {}
with open(args.reference_file) as fin:
for line in fin:
instance = json.loads(line)
# if track is not provided in the refernce file, we use set the track
# to `default` and use the default tokenizer in rouge-score.
if "track" not in instance:
instance["track"] = "default"
eval_instances[instance["id"]] = instance
all_predictions = {}
with open(args.prediction_file) as fin:
for line in fin:
prediction = json.loads(line)
all_predictions[prediction["id"]] = prediction["prediction"]
all_results = {}
for track in ["default", "xlingual"]:
print("Evaluating track:", track)
instance_ids = [id for id, instance in eval_instances.items() if instance["track"] == track]
references = [eval_instances[id]["references"] for id in instance_ids]
predictions = []
missing_predictions = []
for id in instance_ids:
if id in all_predictions:
predictions.append(all_predictions[id])
else:
missing_predictions.append(id)
predictions.append("")
if missing_predictions:
print(
f"No prediction for {len(missing_predictions)} instances. Use empty string as prediction."
)
results = compute_metrics(predictions, references, xlingual=(track == "xlingual"))
print("======== Overall Metrics ========")
for metric, value in results.items():
print(f"{metric}: {value}")
all_results[f"{metric}_{track}_track"] = value
if "task_category" in eval_instances[instance_ids[0]]:
categories = [
"_".join(eval_instances[id]["task_category"].lower().split()) for id in instance_ids
]
results_per_category = compute_grouped_metrics(
predictions, references, categories, xlingual=(track == "xlingual")
)
print("======== Metrics per Category ========")
for metric, value in results_per_category.items():
print(f"{metric}: {value}")
all_results[f"{metric}_{track}_track"] = value
if "task_id" in eval_instances[instance_ids[0]]:
tasks = [eval_instances[id]["task_id"] for id in instance_ids]
results_per_task = compute_grouped_metrics(
predictions, references, tasks, xlingual=(track == "xlingual")
)
print("======== Metrics per Task ========")
for metric, value in results_per_task.items():
print(f"{metric}: {value}")
all_results[f"{metric}_{track}_track"] = value
if args.output_file:
with open(args.output_file, "w") as fout:
json.dump(all_results, fout, indent=2)
| data-efficient-finetuning-main | natural_instructions/ni_evaluation.py |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Natural Instruction V2 Dataset."""
import json
import os
import random
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@article{wang2022benchmarking,
title={Benchmarking Generalization via In-Context Instructions on 1,600+ Language Tasks},
author={Wang, Yizhong and Mishra, Swaroop and Alipoormolabashi, Pegah and Kordi, Yeganeh and others},
journal={arXiv preprint arXiv:2204.07705},
year={2022}
}
"""
_DESCRIPTION = """
Natural-Instructions v2 is a benchmark of 1,600+ diverse language tasks and their expert-written instructions.
It covers 70+ distinct task types, such as tagging, in-filling, and rewriting.
These tasks are collected with contributions of NLP practitioners in the community and
through an iterative peer review process to ensure their quality.
"""
_URL = "https://instructions.apps.allenai.org/"
_VERSION = "2.7"
_RELEASE_URL = f"https://api.github.com/repos/allenai/natural-instructions/zipball/v{_VERSION}"
class NIConfig(datasets.BuilderConfig):
def __init__(
self,
split_subdir="splits/default/",
task_subdir="tasks/",
max_num_instances_per_task=None,
max_num_instances_per_eval_task=None,
seed=42,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.split_subdir: str = split_subdir
self.task_subdir: str = task_subdir
self.seed: int = seed
self.max_num_instances_per_task: int = max_num_instances_per_task
self.max_num_instances_per_eval_task: int = (
max_num_instances_per_eval_task or max_num_instances_per_task
)
class NaturalInstructions(datasets.GeneratorBasedBuilder):
"""NaturalInstructions Dataset."""
VERSION = datasets.Version(_VERSION + ".0")
BUILDER_CONFIG_CLASS = NIConfig
BUILDER_CONFIGS = [
NIConfig(name="default", description="Default config for NaturalInstructions V2")
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
return datasets.DatasetInfo(
version=self.VERSION,
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"), # instance_id
"Task": datasets.Value("string"),
"Contributors": datasets.Value("string"),
"Source": [datasets.Value("string")],
"URL": [datasets.Value("string")],
"Categories": [datasets.Value("string")],
"Reasoning": [datasets.Value("string")],
"Definition": [datasets.Value("string")],
"Positive Examples": [
{
"input": datasets.Value("string"),
"output": datasets.Value("string"),
"explanation": datasets.Value("string"),
}
],
"Negative Examples": [
{
"input": datasets.Value("string"),
"output": datasets.Value("string"),
"explanation": datasets.Value("string"),
}
],
"Input_language": [datasets.Value("string")],
"Output_language": [datasets.Value("string")],
"Instruction_language": [datasets.Value("string")],
"Domains": [datasets.Value("string")],
"Instance": {
"id": datasets.Value("string"),
"input": datasets.Value("string"),
"output": [datasets.Value("string")],
},
}
),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if self.config.data_dir is None:
dl_path = dl_manager.download_and_extract(_RELEASE_URL)
self.config.data_dir = os.path.join(
dl_path, os.listdir(dl_path)[0]
) # get the extracted directory
split_dir = os.path.join(self.config.data_dir, self.config.split_subdir)
task_dir = os.path.join(self.config.data_dir, self.config.task_subdir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"path": os.path.join(split_dir, "train_tasks.txt"),
"task_dir": task_dir,
"max_num_instances_per_task": self.config.max_num_instances_per_task,
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"path": os.path.join(split_dir, "test_tasks.txt"),
"task_dir": task_dir,
"max_num_instances_per_task": self.config.max_num_instances_per_eval_task,
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(
self, path=None, task_dir=None, max_num_instances_per_task=None, split=None
):
"""Yields examples."""
logger.info(f"Reading {split} tasks from {path}")
with open(path, encoding="utf-8") as split_f:
for line in split_f:
task_name = line.strip()
task_path = os.path.join(task_dir, task_name + ".json")
with open(task_path, encoding="utf-8") as task_f:
s = task_f.read()
task_data = json.loads(s)
# rename task name to task_num + source + category
task_name = (
task_name.split("_")[0]
+ "_"
+ "_".join(task_data["Source"]).lower()
+ "_"
+ "_".join(task_data["Categories"][0].lower().split())
)
task_data["Task"] = task_name
if "Instruction Source" in task_data:
task_data.pop("Instruction Source")
all_instances = task_data.pop("Instances")
if split == datasets.Split.TEST:
# for testing tasks, 100 instances are selected for efficient
# evaluation and they are label-balanced.
# we put them in the first for reproducibility.
# so, we use them here
instances = all_instances[:100]
else:
instances = all_instances
if max_num_instances_per_task is not None and max_num_instances_per_task >= 0:
random.Random(self.config.seed).shuffle(instances)
instances = instances[:max_num_instances_per_task]
for idx, instance in enumerate(instances):
example = task_data.copy()
example["id"] = instance["id"]
example["Instance"] = instance
yield f"{task_name}_{idx}", example
| data-efficient-finetuning-main | natural_instructions/ni_dataset.py |
'''
Splits of the evak tasks, as laid out in the NIV2 paper
'''
ENTAILMENT = [
"task937",
# "task202",
# "task936",
# "task641",
# "task1344",
# "task1615",
# "task1385",
# "task935",
# "task199",
# "task1388",
# "task1554",
# "task640",
# "task534",
# "task201",
# "task1386",
# "task463",
# "task1387",
# "task738",
# "task1529",
# "task190",
# "task200",
# "task1612",
# "task970",
# "task890",
# "task464",
# "task1516",
# "task642",
]
CAUSE_EFFECT_CLASSIFICATION = [
#"task1178",
"task391",
# "task939",
# "task392",
# "task938",
# "task1168",
# "task828",
# "task1628",
# "task943",
# "task1182",
# "task1171",
# "task968",
# "task942",
# "task1181",
# "task1172",
# "task1393",
# "task1174",
# "task1627",
# "task1177",
# "task1184",
# "task1185",
# "task1176",
# "task614",
# "task1629",
# "task1175",
# "task827",
# "task1173",
# "task1180",
# "task1170",
# "task1183",
# "task969",
# "task941",
# "task1626",
# "task940",
# "task393",
# "task1169",
# "task1179",
]
COREFERENCE = [
"task1391",
# "task1664",
# "task304",
# "task892",
# "task891",
# "task330",
# "task401",
# "task033",
# "task133",
# "task329",
# "task249",
# "task648",
# "task1390",
# "task893",
]
DIALOGUE_ACT_RECOGNITION = [
"task879",
# "task362",
# "task1533",
# "task1534",
# "task880",
# "task1531",
# "task1394",
]
ANSWERABILITY = [
"task020",
# "task050",
# "task1439",
# "task233",
# "task226",
# "task396",
# "task1640",
# "task232",
# "task1442",
# "task242",
# "task1624",
# "task520",
# "task290",
# "task349",
]
WORD_ANALOGY = [
"task1155",
# "task1152",
# "task1158",
# "task1156",
# "task1157",
# "task1159",
# "task1153",
# "task1154",
]
OVERLAP = [
"task039",
# "task281",
]
KEYWORD_TAGGING = [
"task613",
# "task645",
# "task620",
# "task036",
# "task623",
]
QUESTION_REWRITING = [
"task670",
# "task121",
# "task1195",
# "task442",
# "task1345",
# "task035",
# "task671",
# "task1562",
# "task1622",
# "task034",
# "task402",
]
TITLE_GENERATION = [
"task1356",
# "task1540",
# "task1659",
# "task569",
# "task1342",
# "task220",
# "task1561",
# "task418",
# "task1358",
# "task769",
# "task219",
# "task602",
# "task1586",
# "task743",
# "task500",
# "task619",
# "task510",
# "task288",
# "task1161",
]
DATA_TO_TEXT = [
"task957",
# "task1631",
# "task1598",
# "task1728",
# "task102",
# "task677",
# "task1407",
# "task1409",
# "task760",
]
GRAMMAR_ERROR_CORRECTION = [
"task1557"
]
ALL_EVAL_SPLITS = {
"entailment": ENTAILMENT,
"cause_effect_classification": CAUSE_EFFECT_CLASSIFICATION,
"coreference": COREFERENCE,
"dialogue_act_recognition": DIALOGUE_ACT_RECOGNITION,
"answerability": ANSWERABILITY,
"word_analogy": WORD_ANALOGY,
"overlap": OVERLAP,
"keyword_tagging": KEYWORD_TAGGING,
"question_rewriting": QUESTION_REWRITING,
"title_generation": TITLE_GENERATION,
"data_to_text": DATA_TO_TEXT,
"grammar_error_correction": GRAMMAR_ERROR_CORRECTION
}
for k in ALL_EVAL_SPLITS:
print(k, len(ALL_EVAL_SPLITS[k])) | data-efficient-finetuning-main | natural_instructions/task_eval_splits.py |
import logging
import random
import string
from dataclasses import dataclass
from typing import Any, Optional, Union
from transformers import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
logger = logging.getLogger(__name__)
@dataclass
class DataCollatorForNI:
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_source_length: Optional[int] = None
max_target_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
add_task_name: bool = False
add_task_definition: bool = True
num_pos_examples: int = 0
num_neg_examples: int = 0
add_explanation: bool = False
tk_instruct: bool = False
text_only: bool = False
random_gen: random.Random = random.Random(42)
def __call__(self, batch, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
sources = []
for instance in batch:
if self.tk_instruct:
all_valid_encodings = [
# instruction only
{
"add_task_name": False,
"add_task_definition": True,
"num_pos_examples": 0,
"num_neg_examples": 0,
"add_explanation": False,
},
# example only
{
"add_task_name": False,
"add_task_definition": False,
"num_pos_examples": 2,
"num_neg_examples": 0,
"add_explanation": False,
},
# instruction + pos examples
{
"add_task_name": False,
"add_task_definition": True,
"num_pos_examples": 2,
"num_neg_examples": 0,
"add_explanation": False,
},
# instruction + pos examples + neg examples
{
"add_task_name": False,
"add_task_definition": True,
"num_pos_examples": 2,
"num_neg_examples": 2,
"add_explanation": False,
},
# instruction + pos (w. explanation)
{
"add_task_name": False,
"add_task_definition": True,
"num_pos_examples": 2,
"num_neg_examples": 0,
"add_explanation": True,
},
]
encoding_schema = self.random_gen.choice(all_valid_encodings)
add_task_name = encoding_schema["add_task_name"]
add_task_definition = encoding_schema["add_task_definition"]
num_pos_examples = encoding_schema["num_pos_examples"]
num_neg_examples = encoding_schema["num_neg_examples"]
add_explanation = encoding_schema["add_explanation"]
else:
add_task_name = self.add_task_name
add_task_definition = self.add_task_definition
num_pos_examples = self.num_pos_examples
num_neg_examples = self.num_neg_examples
add_explanation = self.add_explanation
task_input = ""
# add the input first.
task_input += "Now complete the following example -\n"
task_input += f"Input: {instance['Instance']['input'].strip()}"
if not task_input[-1] in string.punctuation:
task_input += "."
task_input += "\n"
task_input += "Output: "
task_name = ""
if add_task_name:
task_name += instance["Task"] + ". "
definition = ""
if add_task_definition:
if isinstance(instance["Definition"], list):
definition = (
"Definition: " + instance["Definition"][0].strip()
) # TODO: should we use <Definition>?
else:
definition = "Definition: " + instance["Definition"].strip()
if not definition[-1] in string.punctuation:
definition += "."
definition += "\n\n"
# try to add positive examples.
pos_examples = []
for idx, pos_example in enumerate(instance["Positive Examples"][:num_pos_examples]):
pos_example_str = f" Positive Example {idx+1} -\n"
pos_example_str += f"Input: {pos_example['input'].strip()}"
if not pos_example_str[-1] in string.punctuation:
pos_example_str += "."
pos_example_str += "\n"
pos_example_str += f" Output: {pos_example['output'].strip()}"
if not pos_example_str[-1] in string.punctuation:
pos_example_str += "."
pos_example_str += "\n"
if add_explanation and "explanation" in pos_example:
pos_example_str += f" Explanation: {pos_example['explanation'].strip()}"
if not pos_example_str[-1] in string.punctuation:
pos_example_str += "."
pos_example_str += "\n"
pos_example_str += "\n"
if (
len(
self.tokenizer(
definition + " ".join(pos_examples) + pos_example_str + task_input
)["input_ids"]
)
<= self.max_source_length
):
pos_examples.append(pos_example_str)
else:
break
# try to add negative examples.
neg_examples = []
for idx, neg_example in enumerate(instance["Negative Examples"][:num_neg_examples]):
neg_example_str = f" Negative Example {idx+1} -\n"
neg_example_str += f"Input: {neg_example['input'].strip()}"
if not neg_example_str[-1] in string.punctuation:
neg_example_str += "."
neg_example_str += "\n"
neg_example_str += f" Output: {neg_example['output'].strip()}"
if not neg_example_str[-1] in string.punctuation:
neg_example_str += "."
neg_example_str += "\n"
if add_explanation and "explanation" in neg_example:
neg_example_str += f" Explanation: {neg_example['explanation'].strip()}"
if not neg_example_str[-1] in string.punctuation:
neg_example_str += "."
neg_example_str += "\n"
neg_example_str += "\n"
if (
len(
self.tokenizer(
definition
+ " ".join(pos_examples)
+ " ".join(neg_examples)
+ neg_example_str
+ task_input
)["input_ids"]
)
<= self.max_source_length
):
neg_examples.append(neg_example_str)
else:
break
source = (
task_name + definition + "".join(pos_examples) + "".join(neg_examples) + task_input
)
tokenized_source = self.tokenizer(source)["input_ids"]
if len(tokenized_source) <= self.max_source_length:
sources.append(source)
else:
sources.append(
self.tokenizer.decode(
tokenized_source[: self.max_source_length], skip_special_tokens=True
)
)
if self.text_only:
model_inputs = {"inputs": sources}
else:
model_inputs = self.tokenizer(
sources,
max_length=self.max_source_length,
padding=self.padding,
return_tensors=self.return_tensors,
truncation=True,
pad_to_multiple_of=self.pad_to_multiple_of,
)
if "output" in batch[0]["Instance"] and batch[0]["Instance"]["output"]:
# Randomly select one reference if multiple are provided.
labels = [self.random_gen.choice(ex["Instance"]["output"]) for ex in batch]
if self.text_only:
model_inputs["labels"] = labels
else:
with self.tokenizer.as_target_tokenizer():
labels = self.tokenizer(
labels,
max_length=self.max_target_length,
padding=self.padding,
return_tensors=self.return_tensors,
truncation=True,
pad_to_multiple_of=self.pad_to_multiple_of,
)
label_mask = labels["attention_mask"].bool()
model_inputs["labels"] = labels["input_ids"].masked_fill(
~label_mask, self.label_pad_token_id
)
else:
model_inputs["labels"] = None
# prepare decoder_input_ids
if (
self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
and not self.text_only
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(
labels=model_inputs["labels"]
)
model_inputs["decoder_input_ids"] = decoder_input_ids
return model_inputs
| data-efficient-finetuning-main | natural_instructions/ni_collator.py |
import logging
import uuid
from pathlib import Path
from typing import Generator
import petname
import pytest
from beaker import exceptions
from beaker.client import Beaker
from beaker.data_model import *
logger = logging.getLogger(__name__)
def unique_name() -> str:
return petname.generate() + "-" + str(uuid.uuid4())[:8] # type: ignore
def beaker_object_fixture(client: Beaker, service: str):
name = unique_name()
service_client = getattr(client, service)
not_found_exception = getattr(exceptions, f"{service.title()}NotFound")
yield name
try:
logger.info("Attempting to remove %s '%s' from Beaker", service, name)
service_client.delete(name)
logger.info("Successfully deleted %s '%s' from Beaker", service, name)
except not_found_exception:
logger.info("%s '%s' not found on Beaker", service.title(), name)
@pytest.fixture()
def workspace_name() -> str:
name = "ai2/beaker-py-testing"
return name
@pytest.fixture()
def alternate_workspace_name() -> str:
name = "ai2/beaker-py-testing-alternative"
return name
@pytest.fixture()
def client(workspace_name):
beaker_client = Beaker.from_env(
session=True, default_workspace=workspace_name, default_org="ai2"
)
return beaker_client
@pytest.fixture()
def alternate_workspace(client: Beaker, alternate_workspace_name: str) -> Workspace:
return client.workspace.get(alternate_workspace_name)
@pytest.fixture
def beaker_org_name() -> str:
return "ai2"
@pytest.fixture()
def beaker_org(client: Beaker, beaker_org_name: str) -> Organization:
return client.organization.get(beaker_org_name)
@pytest.fixture()
def docker_image_name(client: Beaker):
image = "hello-world"
client.docker.images.pull(image)
return image
@pytest.fixture()
def beaker_image_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "image")
@pytest.fixture()
def beaker_python_image_name() -> str:
return "petew/python-3-10-alpine"
@pytest.fixture()
def alternate_beaker_image_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "image")
@pytest.fixture()
def beaker_cluster_name() -> str:
return "ai2/canary"
@pytest.fixture()
def beaker_on_prem_cluster_name() -> str:
return "ai2/allennlp-cirrascale"
@pytest.fixture()
def experiment_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "experiment")
@pytest.fixture()
def alternate_experiment_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "experiment")
@pytest.fixture()
def dataset_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "dataset")
@pytest.fixture()
def alternate_dataset_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "dataset")
@pytest.fixture()
def download_path(dataset_name, tmp_path) -> Path:
path = tmp_path / dataset_name
return path
@pytest.fixture()
def hello_world_experiment_name() -> str:
return "hello-world-1"
@pytest.fixture()
def hello_world_experiment_id() -> str:
return "01GRYY998GG0VP97MKRE574GKA"
@pytest.fixture()
def hello_world_image_name() -> str:
return "petew/hello-world"
@pytest.fixture()
def hello_world_job_id() -> str:
return "01GRYY9P9G5ZJ0F66NV3AHN9AN"
@pytest.fixture()
def beaker_node_id(client: Beaker, beaker_on_prem_cluster_name: str) -> str:
return client.cluster.nodes(beaker_on_prem_cluster_name)[0].id
@pytest.fixture()
def secret_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "secret")
@pytest.fixture()
def archived_workspace_name() -> str:
return "ai2/beaker-py-testing-archived"
@pytest.fixture()
def archived_workspace(client: Beaker, archived_workspace_name: str) -> Workspace:
workspace = client.workspace.ensure(archived_workspace_name)
if not workspace.archived:
return client.workspace.archive(archived_workspace_name)
else:
return workspace
@pytest.fixture()
def squad_dataset_name() -> str:
return "petew/squad-train"
@pytest.fixture()
def squad_dataset_file_name() -> str:
return "squad-train.arrow"
@pytest.fixture()
def alternate_user(client: Beaker) -> Account:
return client.account.get("epwalsh10")
@pytest.fixture()
def group_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "group")
@pytest.fixture()
def alternate_group_name(client: Beaker) -> Generator[str, None, None]:
yield from beaker_object_fixture(client, "group")
@pytest.fixture()
def experiment_id_with_metrics() -> str:
return "01G371J03VGJGK720TMZWFQNV3"
@pytest.fixture()
def experiment_id_with_results() -> str:
return "01G371J03VGJGK720TMZWFQNV3"
| beaker-py-main | conftest.py |
"""
Tests creating, pushing, and pulling images to/from Beaker.
This requires building a test image called "beaker-py-test" using the Dockerfile
at "test_fixtures/docker/Dockerfile".
"""
from beaker import Beaker
LOCAL_IMAGE_TAG = "beaker-py-test"
def test_image_create_workflow(
client: Beaker, beaker_image_name: str, alternate_beaker_image_name: str
):
# Create and push the image.
print(f"Creating image '{beaker_image_name}'")
image = client.image.create(beaker_image_name, LOCAL_IMAGE_TAG)
assert image.name == beaker_image_name
assert image.original_tag == LOCAL_IMAGE_TAG
# Rename the image.
print(f"Renaming image to '{alternate_beaker_image_name}'")
image = client.image.rename(image, alternate_beaker_image_name)
assert image.name == alternate_beaker_image_name
def test_image_pull_workflow(client: Beaker, beaker_python_image_name: str):
print(f"Pulling image '{beaker_python_image_name}' from Beaker")
local_image = client.image.pull(beaker_python_image_name)
print(f"Pull complete: {local_image}")
| beaker-py-main | integration_tests/images_test.py |
import pytest
from beaker import (
Beaker,
ExperimentConflict,
ExperimentNotFound,
ExperimentSpec,
ImageSource,
ResultSpec,
TaskContext,
TaskSpec,
)
def test_experiment_workflow(
client: Beaker,
experiment_name: str,
alternate_experiment_name: str,
beaker_cluster_name: str,
hello_world_experiment_name: str,
):
spec = ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image=ImageSource(docker="hello-world"),
context=TaskContext(cluster=beaker_cluster_name),
result=ResultSpec(path="/unused"), # required even if the task produces no output.
),
],
)
# Create the experiment.
experiment = client.experiment.create(experiment_name, spec)
# Wait for it to finish.
experiment = client.experiment.wait_for(experiment, timeout=60 * 5)[0]
# Get the logs.
logs = "".join([line.decode() for line in client.experiment.logs(experiment)])
assert logs
# Test experiment conflict error with rename.
with pytest.raises(ExperimentConflict):
client.experiment.rename(experiment, hello_world_experiment_name)
# Rename the experiment.
experiment = client.experiment.rename(experiment, alternate_experiment_name)
assert experiment.name == alternate_experiment_name
# Test experiment not found error.
with pytest.raises(ExperimentNotFound):
client.experiment.get(experiment_name)
| beaker-py-main | integration_tests/experiments_test.py |
import time
from beaker import Beaker, ExperimentSpec, TaskSpec
def test_job_stop_and_finalize(client: Beaker, experiment_name: str, beaker_cluster_name: str):
start = time.time()
spec = ExperimentSpec().with_task(
TaskSpec.new(
"main",
beaker_cluster_name,
docker_image="hello-world",
),
)
print(f"Creating experiment {experiment_name}")
experiment = client.experiment.create(experiment_name, spec)
print("Waiting for jobs to register", end="")
while not experiment.jobs:
if time.time() - start > (60 * 5):
raise TimeoutError
time.sleep(2)
print(".", end="")
experiment = client.experiment.get(experiment.id)
print("\nStopping job")
client.job.stop(experiment.jobs[0])
print("Finalizing job")
job = client.job.finalize(experiment.jobs[0])
assert job.is_finalized
| beaker-py-main | integration_tests/jobs_test.py |
import os
import tempfile
from pathlib import Path
from typing import Optional
import pytest
from beaker.client import Beaker, DatasetClient
from beaker.exceptions import DatasetWriteError
class TestDataset:
def setup_method(self):
self.file_a = tempfile.NamedTemporaryFile(delete=False)
self.file_a_contents = b"a" * 10
self.file_a.write(self.file_a_contents)
self.file_a.seek(0)
self.file_a.close()
self.file_b = tempfile.NamedTemporaryFile(delete=False)
self.file_b_contents = b"b" * 10
self.file_b.write(self.file_b_contents)
self.file_b.seek(0)
self.file_b.close()
def teardown_method(self):
os.remove(self.file_a.name)
os.remove(self.file_b.name)
def test_dataset_write_error(self, client: Beaker, dataset_name: str):
dataset = client.dataset.create(dataset_name, self.file_a.name, commit=True)
with pytest.raises(DatasetWriteError):
client.dataset.sync(dataset, self.file_b.name)
def test_dataset_basics(self, client: Beaker, dataset_name: str, alternate_dataset_name: str):
dataset = client.dataset.create(
dataset_name,
self.file_a.name,
self.file_b.name,
commit=True,
description="Testing dataset",
)
assert dataset.name == dataset_name
# Stream the whole thing at once.
contents = b"".join(list(client.dataset.stream_file(dataset, Path(self.file_a.name).name)))
assert contents == self.file_a_contents
# Stream just part of the file.
contents = b"".join(
list(client.dataset.stream_file(dataset, Path(self.file_a.name).name, offset=5))
)
assert contents == self.file_a_contents[5:]
# Calculate the size.
assert client.dataset.size(dataset) == 20
# Rename it.
dataset = client.dataset.rename(dataset, alternate_dataset_name)
assert dataset.name == alternate_dataset_name
class TestLargeFileDataset:
def setup_method(self):
self.original_size_limit = DatasetClient.REQUEST_SIZE_LIMIT
DatasetClient.REQUEST_SIZE_LIMIT = 1024
self.large_file = tempfile.NamedTemporaryFile(delete=False)
self.large_file_contents = b"a" * 1024 * 2
self.large_file.write(self.large_file_contents)
self.large_file.close()
def teardown_method(self):
DatasetClient.REQUEST_SIZE_LIMIT = self.original_size_limit
os.remove(self.large_file.name)
@pytest.mark.parametrize(
"commit_right_away",
(pytest.param(True, id="commit now"), pytest.param(False, id="commit later")),
)
def test_large_file_dataset(
self, client: Beaker, dataset_name: str, tmp_path: Path, commit_right_away: bool
):
# Create the dataset.
dataset = client.dataset.create(
dataset_name, self.large_file.name, commit=commit_right_away
)
if not commit_right_away:
dataset = client.dataset.commit(dataset)
# Verify fields.
assert dataset.name == dataset_name
assert dataset.committed is not None
# Fetch the dataset.
client.dataset.fetch(dataset, target=tmp_path)
large_file_path = tmp_path / self.large_file.name
assert large_file_path.is_file(), f"{list(tmp_path.iterdir())}"
with open(large_file_path, "rb") as large_file:
contents = large_file.read()
assert contents == self.large_file_contents
class TestManyFilesDataset:
@pytest.mark.parametrize(
"target",
(pytest.param("target_dir", id="target dir"), pytest.param(None, id="no target dir")),
)
def test_many_files_dataset(
self, client: Beaker, dataset_name: str, tmp_path: Path, target: Optional[str]
):
# Create the local sources.
dir_to_upload = tmp_path / "dataset_dir"
dir_to_upload.mkdir()
for i in range(100):
(dir_to_upload / f"file{i}.txt").open("w").write(str(i))
file_to_upload = tmp_path / "dataset_file.txt"
file_to_upload.open("w").write("Hello, World!")
# Create the dataset.
dataset = client.dataset.create(dataset_name, dir_to_upload, file_to_upload, target=target)
# List files in the dataset.
files = list(client.dataset.ls(dataset))
assert len(files) == 101
for file_info in files:
if target is not None:
assert file_info.path.startswith(target)
assert file_info.path.endswith(".txt")
# Download the dataset.
download_dir = tmp_path / "download"
client.dataset.fetch(dataset, target=download_dir)
base_dir = (download_dir / target) if target is not None else download_dir
for i in range(100):
downloaded = base_dir / f"file{i}.txt"
assert downloaded.is_file()
assert downloaded.open("r").read() == str(i)
assert (base_dir / "dataset_file.txt").is_file()
assert (base_dir / "dataset_file.txt").open("r").read() == "Hello, World!"
| beaker-py-main | integration_tests/datasets_test.py |
from beaker import Beaker, Organization
def test_cluster_get_on_prem(client: Beaker, beaker_on_prem_cluster_name: str):
cluster = client.cluster.get(beaker_on_prem_cluster_name)
assert cluster.autoscale is False
assert cluster.is_cloud is False
assert cluster.is_active is True
assert cluster.node_spec is None
assert cluster.node_shape is None
def test_cluster_utilization(client: Beaker, beaker_on_prem_cluster_name: str):
client.cluster.utilization(beaker_on_prem_cluster_name)
def test_cluster_list(client: Beaker, beaker_org: Organization):
client.cluster.list(beaker_org)
def test_cluster_nodes(client: Beaker, beaker_on_prem_cluster_name: str):
client.cluster.nodes(beaker_on_prem_cluster_name)
def test_cluster_url(client: Beaker):
assert (
client.cluster.url("ai2/allennlp-cirrascale")
== "https://beaker.org/cl/ai2/allennlp-cirrascale/details"
)
| beaker-py-main | tests/cluster_test.py |
import pytest
from beaker import Beaker, GroupConflict, GroupNotFound
def test_group_methods(
client: Beaker, group_name: str, alternate_group_name: str, hello_world_experiment_id: str
):
# Create a new group.
group = client.group.create(group_name)
assert group.name == group_name
# Add an experiment to the group.
client.group.add_experiments(group, hello_world_experiment_id)
assert len(client.group.list_experiments(group)) == 1
# Export the experiments from the group
# (expect a three line CSV: the header, one experiment, and a trailing newline)
export = list(client.group.export_experiments(group))
assert len(export) == 1
assert len(export[0].decode().split("\n")) == 3
# Remove the experiment from the group.
client.group.remove_experiments(group, hello_world_experiment_id)
assert len(client.group.list_experiments(group)) == 0
# Rename the group.
group = client.group.rename(group, alternate_group_name)
assert group.name == alternate_group_name
# Test group not found error.
with pytest.raises(GroupNotFound):
client.group.get(group_name)
# Test group conflict error.
with pytest.raises(GroupConflict):
client.group.create(alternate_group_name)
# List groups in the workspace.
group_names = [group.name for group in client.workspace.groups()]
assert alternate_group_name in group_names
| beaker-py-main | tests/group_test.py |
import pytest
from beaker import (
Beaker,
ClusterNotFound,
CurrentJobStatus,
DataMount,
DatasetNotFound,
DataSource,
ExperimentSpec,
ImageNotFound,
ImageSource,
ResultSpec,
SecretNotFound,
TaskContext,
TaskNotFound,
TaskSpec,
)
def test_experiment_get(client: Beaker, hello_world_experiment_id: str):
exp = client.experiment.get(hello_world_experiment_id)
assert exp.id == hello_world_experiment_id
assert exp.jobs
assert exp.jobs[0].status.current == CurrentJobStatus.finalized
# Get with name.
assert exp.name is not None
client.experiment.get(exp.name)
# Get with full name.
assert exp.full_name is not None
client.experiment.get(exp.full_name)
def test_experiment_tasks(client: Beaker, hello_world_experiment_id: str):
tasks = client.experiment.tasks(hello_world_experiment_id)
assert len(tasks) == 1
def test_experiment_metrics_none(client: Beaker, hello_world_experiment_id: str):
metrics = client.experiment.metrics(hello_world_experiment_id)
assert metrics is None
def test_experiment_metrics(client: Beaker, experiment_id_with_metrics: str):
metrics = client.experiment.metrics(experiment_id_with_metrics)
assert metrics is not None
def test_experiment_results(client, experiment_id_with_results: str):
results = client.experiment.results(experiment_id_with_results)
assert results is not None
assert client.dataset.size(results) > 0
def test_experiment_empty_results(client: Beaker, hello_world_experiment_id: str):
results = client.experiment.results(hello_world_experiment_id)
assert results is None or (client.dataset.size(results) == 0)
def test_experiment_spec(client: Beaker, hello_world_experiment_id: str):
spec = client.experiment.spec(hello_world_experiment_id)
assert isinstance(spec, ExperimentSpec)
def test_create_experiment_image_not_found(
client: Beaker,
experiment_name: str,
beaker_cluster_name: str,
):
spec = ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image=ImageSource(beaker="does-not-exist"),
context=TaskContext(cluster=beaker_cluster_name),
result=ResultSpec(path="/unused"),
),
],
)
with pytest.raises(ImageNotFound):
client.experiment.create(experiment_name, spec)
def test_create_experiment_dataset_not_found(
client: Beaker,
experiment_name: str,
beaker_cluster_name: str,
):
spec = ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image=ImageSource(docker="hello-world"),
context=TaskContext(cluster=beaker_cluster_name),
result=ResultSpec(path="/unused"),
datasets=[
DataMount(source=DataSource(beaker="does-not-exist"), mount_path="/data")
],
),
],
)
with pytest.raises(DatasetNotFound):
client.experiment.create(experiment_name, spec)
def test_create_experiment_secret_not_found(
client: Beaker,
experiment_name: str,
beaker_cluster_name: str,
):
spec = ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image=ImageSource(docker="hello-world"),
context=TaskContext(cluster=beaker_cluster_name),
result=ResultSpec(path="/unused"),
datasets=[
DataMount(source=DataSource(secret="does-not-exist"), mount_path="/data")
],
),
],
)
with pytest.raises(SecretNotFound):
client.experiment.create(experiment_name, spec)
def test_create_experiment_result_not_found(
client: Beaker,
experiment_name: str,
beaker_cluster_name: str,
):
spec = ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image=ImageSource(docker="hello-world"),
context=TaskContext(cluster=beaker_cluster_name),
result=ResultSpec(path="/unused"),
datasets=[
DataMount(source=DataSource(result="does-not-exist"), mount_path="/data")
],
),
],
)
with pytest.raises(ValueError, match="does-not-exist"):
client.experiment.create(experiment_name, spec)
def test_create_experiment_cluster_not_found(
client: Beaker,
experiment_name: str,
):
spec = ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image=ImageSource(docker="hello-world"),
context=TaskContext(cluster="does-not-exist"),
result=ResultSpec(path="/unused"),
),
],
)
with pytest.raises(ClusterNotFound):
client.experiment.create(experiment_name, spec)
def test_experiment_url(client: Beaker, hello_world_experiment_id: str):
assert (
client.experiment.url(hello_world_experiment_id)
== f"https://beaker.org/ex/{hello_world_experiment_id}"
)
assert (
client.experiment.url(hello_world_experiment_id, "main")
== f"https://beaker.org/ex/{hello_world_experiment_id}/tasks/01GRYY999VAT2QY75G89A826YS"
)
with pytest.raises(TaskNotFound, match="No task"):
client.experiment.url(hello_world_experiment_id, "foo")
| beaker-py-main | tests/experiment_test.py |
from typing import Optional, Union
import pytest
from beaker import (
Account,
Beaker,
Permission,
Workspace,
WorkspaceNotFound,
WorkspaceWriteError,
)
def test_ensure_workspace_invalid_name(client: Beaker):
with pytest.raises(ValueError, match="Invalid name"):
client.workspace.ensure("blah&&")
def test_workspace_get(client: Beaker, workspace_name: str):
workspace = client.workspace.get(workspace_name)
# Now get by ID.
client.workspace.get(workspace.id)
# Now get by name without the org prefix.
client.workspace.get(workspace.name)
@pytest.mark.parametrize("match", [pytest.param(v, id=f"match={v}") for v in (None, "squad")])
@pytest.mark.parametrize(
"results", [pytest.param(v, id=f"results={v}") for v in (None, True, False)]
)
@pytest.mark.parametrize(
"uncommitted", [pytest.param(v, id=f"uncommitted={v}") for v in (None, True, False)]
)
def test_workspace_datasets(
client: Beaker, match: Optional[str], results: Optional[bool], uncommitted: Optional[bool]
):
client.workspace.datasets(match=match, results=results, uncommitted=uncommitted, limit=50)
def test_workspace_experiments(client: Beaker, hello_world_experiment_name: str):
experiments = client.workspace.experiments(match=hello_world_experiment_name)
assert experiments
def test_workspace_images(client: Beaker):
images = client.workspace.images(match="hello-world")
assert images
def test_workspace_list(client: Beaker, workspace_name: str):
workspaces = client.workspace.list("ai2", match=workspace_name.split("/")[1])
assert workspaces
def test_archived_workspace_write_error(client: Beaker, archived_workspace: Workspace):
with pytest.raises(WorkspaceWriteError):
client.workspace.archive(archived_workspace)
with pytest.raises(WorkspaceWriteError):
client.secret.write("foo", "bar", workspace=archived_workspace)
def test_archived_workspace_read_ok(client: Beaker, archived_workspace: Workspace):
client.workspace.secrets(archived_workspace)
def test_organization_not_set(client: Beaker, archived_workspace: Workspace):
client.config.default_org = None
with pytest.raises(WorkspaceNotFound):
client.workspace.secrets(archived_workspace.name)
def test_workspace_move(
client: Beaker, workspace_name: str, alternate_workspace_name: str, dataset_name: str
):
dataset = client.dataset.create(dataset_name, workspace=alternate_workspace_name)
assert dataset.workspace_ref.full_name == alternate_workspace_name
client.workspace.move(dataset)
assert client.dataset.get(dataset.id).workspace_ref.full_name == workspace_name
def list_objects(client: Beaker, workspace: Optional[Union[str, Workspace]]):
client.workspace.secrets(workspace=workspace)
client.workspace.datasets(workspace=workspace, limit=2, results=False)
client.workspace.experiments(workspace=workspace, limit=2, match="hello-world")
client.workspace.images(workspace=workspace, limit=2, match="hello-world")
def test_default_workspace_list_objects(client: Beaker):
list_objects(client, None)
def test_workspace_list_objects_with_id(client: Beaker, alternate_workspace: Workspace):
list_objects(client, alternate_workspace.id)
def test_workspace_list_objects_with_short_name(client: Beaker, alternate_workspace: Workspace):
list_objects(client, alternate_workspace.name)
def test_workspace_list_objects_with_full_name(client: Beaker, alternate_workspace: Workspace):
list_objects(client, alternate_workspace.full_name)
def test_workspace_list_objects_with_object(client: Beaker, alternate_workspace: Workspace):
list_objects(client, alternate_workspace)
def test_workspace_get_permissions(client: Beaker):
client.workspace.get_permissions()
def test_workspace_grant_and_revoke_permissions(client: Beaker, alternate_user: Account):
client.workspace.grant_permissions(Permission.read, alternate_user)
client.workspace.revoke_permissions(alternate_user)
def test_workspace_set_visibility(client: Beaker):
client.workspace.set_visibility(public=False)
def test_workspace_set_visibility_archived(client: Beaker, archived_workspace_name: str):
client.workspace.set_visibility(public=False, workspace=archived_workspace_name)
def test_workspace_url(client: Beaker):
assert (
client.workspace.url("ai2/beaker-py-testing")
== "https://beaker.org/ws/ai2/beaker-py-testing"
)
| beaker-py-main | tests/workspace_test.py |
from pathlib import Path
import pytest
from beaker.data_model import *
from beaker.data_model.base import MappedSequence
from beaker.exceptions import ValidationError
def test_data_source_validation():
with pytest.raises(ValidationError, match="Exactly one"):
DataSource()
with pytest.raises(ValidationError, match="Exactly one"):
DataSource(beaker="foo", host_path="bar")
with pytest.raises(ValidationError, match="Exactly one"):
DataSource(beaker="foo", hostPath="bar") # type: ignore
assert DataSource(host_path="bar").host_path == "bar"
def test_experiment_spec_from_and_to_json_and_file(beaker_cluster_name: str, tmp_path: Path):
json_spec = {
"version": "v2",
"tasks": [
{
"name": "main",
"image": {"docker": "hello-world"},
"context": {"cluster": beaker_cluster_name},
"result": {"path": "/unused"},
"resources": {"memory": "512m", "sharedMemory": "512m"},
"hostNetworking": False,
"leaderSelection": False,
},
],
}
spec = ExperimentSpec.from_json(json_spec)
assert spec.to_json() == json_spec
spec_path = tmp_path / "spec.yml"
spec.to_file(spec_path)
assert ExperimentSpec.from_file(spec_path) == spec
def test_experiment_spec_validation():
with pytest.raises(ValidationError, match="Duplicate task name"):
ExperimentSpec.from_json(
{
"tasks": [
{
"name": "main",
"image": {"docker": "hello-world"},
"context": {"cluster": "foo"},
"result": {"path": "/unused"},
},
{
"name": "main",
"image": {"docker": "hello-world"},
"context": {"cluster": "bar"},
"result": {"path": "/unused"},
},
]
}
)
with pytest.raises(ValidationError, match="Duplicate task name"):
ExperimentSpec(
tasks=[
TaskSpec(
name="main",
image={"docker": "hello-world"}, # type: ignore
context={"cluster": "foo"}, # type: ignore
result={"path": "/unused"}, # type: ignore
),
TaskSpec(
name="main",
image={"docker": "hello-world"}, # type: ignore
context={"cluster": "bar"}, # type: ignore
result={"path": "/unused"}, # type: ignore
),
]
)
spec = ExperimentSpec().with_task(TaskSpec.new("main", "foo", docker_image="hello-world"))
with pytest.raises(ValueError, match="A task with the name"):
spec.with_task(TaskSpec.new("main", "bar", docker_image="hello-world"))
def test_snake_case_vs_lower_camel_case():
for x in (DataSource(host_path="/tmp/foo"), DataSource(hostPath="/tmp/foo")): # type: ignore
assert str(x) == "DataSource(beaker=None, host_path='/tmp/foo', result=None, secret=None)"
assert x.host_path == "/tmp/foo"
x.host_path = "/tmp/bar"
assert str(x) == "DataSource(beaker=None, host_path='/tmp/bar', result=None, secret=None)"
assert x.to_json() == {"hostPath": "/tmp/bar"}
def test_digest_init():
# All of these are equivalent:
for digest in (
# String form.
Digest("SHA256 iA02Sx8UNLYvMi49fDwdGjyy5ssU+ttuN1L4L3/JvZA="),
# Hex-encoded string.
Digest(
"880d364b1f1434b62f322e3d7c3c1d1a3cb2e6cb14fadb6e3752f82f7fc9bd90", algorithm="SHA256"
),
# Raw bytes.
Digest(
b"\x88\r6K\x1f\x144\xb6/2.=|<\x1d\x1a<\xb2\xe6\xcb\x14\xfa\xdbn7R\xf8/\x7f\xc9\xbd\x90",
algorithm="SHA256",
),
):
assert digest.value == "880d364b1f1434b62f322e3d7c3c1d1a3cb2e6cb14fadb6e3752f82f7fc9bd90"
def test_digest_hashable():
digest = Digest.from_encoded("SHA256 0Q/XIPetp+QFDce6EIYNVcNTCZSlPqmEfVs1eFEMK0Y=")
d = {digest: 1}
assert digest in d
def test_mapped_sequence():
ms = MappedSequence([1, 2, 3], {"a": 1, "b": 2, "c": 3})
assert ms["a"] == 1
assert ms[0] == 1
assert len(ms) == 3
assert "a" in ms
assert 1 in ms
assert list(ms) == [1, 2, 3]
assert set(ms.keys()) == {"a", "b", "c"}
assert ms.get("a") == 1
assert "z" not in ms
@pytest.mark.parametrize(
"cluster", [["ai2/general-cirrascale", "ai2/allennlp-cirrascale"], "ai2/general-cirrascale"]
)
def test_experiment_spec_new_with_cluster(cluster):
spec = ExperimentSpec.new(cluster=cluster)
assert spec.tasks[0].context.cluster is None
assert spec.tasks[0].constraints is not None
assert isinstance(spec.tasks[0].constraints.cluster, list)
def test_task_spec_with_constraint():
task_spec = TaskSpec.new("main", constraints=Constraints(cluster=["ai2/general-cirrascale"]))
new_task_spec = task_spec.with_constraint(cluster=["ai2/allennlp-cirrascale"])
assert new_task_spec.constraints is not None
assert new_task_spec.constraints.cluster == ["ai2/allennlp-cirrascale"]
# Shouldn't modify the original.
assert task_spec.constraints is not None
assert task_spec.constraints.cluster == ["ai2/general-cirrascale"]
# These methods should all be equivalent.
for task_spec in (
TaskSpec.new("main", constraints={"cluster": ["ai2/general-cirrascale"]}),
TaskSpec.new("main", cluster="ai2/general-cirrascale"),
TaskSpec.new("main", cluster=["ai2/general-cirrascale"]),
):
assert task_spec.constraints is not None
assert task_spec.constraints.cluster == ["ai2/general-cirrascale"]
def test_constraints_behave_like_dictionaries():
c = Constraints()
c["cluster"] = ["ai2/general-cirrascale"]
assert c.cluster == ["ai2/general-cirrascale"]
def test_constraints_extra_fields():
c = Constraints(cluster=["ai2/general-cirrascale"], gpus=["A100"]) # type: ignore
assert hasattr(c, "gpus")
| beaker-py-main | tests/data_model_test.py |
beaker-py-main | tests/__init__.py |
|
import base64
import time
import pytest
from beaker.client import Beaker
from beaker.services.service_client import ServiceClient
from beaker.util import *
@pytest.mark.parametrize(
"camel_case, snake_case",
[
("hostPath", "host_path"),
("fooBarBaz", "foo_bar_baz"),
("docker", "docker"),
],
)
def test_to_lower_camel_and_back(camel_case: str, snake_case: str):
assert to_lower_camel(snake_case) == camel_case
assert to_snake_case(camel_case) == snake_case
def test_cached_property(client: Beaker, alternate_workspace_name):
class FakeService(ServiceClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._x = 0
@cached_property(ttl=0.5)
def x(self) -> int:
self._x += 1
return self._x
service_client = FakeService(client)
assert service_client.x == 1
assert service_client.x == 1
time.sleep(1.0)
assert service_client.x == 2
client.config.default_workspace = alternate_workspace_name
assert service_client.x == 3
def test_format_cursor():
cursor = 100
formatted = format_cursor(100)
assert int.from_bytes(base64.urlsafe_b64decode(formatted), "little") == cursor
| beaker-py-main | tests/util_test.py |
from beaker.client import Beaker
def test_dataset_get(client: Beaker, squad_dataset_name: str):
dataset = client.dataset.get(squad_dataset_name)
assert dataset.name is not None
# Try with ID.
client.dataset.get(dataset.id)
# Try with just name (without account prefix).
client.dataset.get(dataset.name)
def test_dataset_ls(client: Beaker, squad_dataset_name: str):
client.dataset.ls(squad_dataset_name)
def test_file_info(client: Beaker, squad_dataset_name: str, squad_dataset_file_name: str):
client.dataset.file_info(squad_dataset_name, squad_dataset_file_name)
def test_upload(client: Beaker, dataset_name: str):
ds = client.dataset.create(dataset_name, commit=False)
client.dataset.upload(ds, b"foo-bar", "foo-bar")
| beaker-py-main | tests/dataset_test.py |
from beaker import Beaker
def test_secrets(client: Beaker, secret_name: str):
secret = client.secret.write(secret_name, "foo")
assert secret.name == secret_name
assert client.secret.get(secret_name) == secret
assert client.secret.read(secret) == "foo"
| beaker-py-main | tests/secret_test.py |
from beaker import Beaker
def test_organization_get(client: Beaker, beaker_org_name: str):
org = client.organization.get(beaker_org_name)
assert org.name == beaker_org_name
# Now get by ID.
client.organization.get(org.id)
def test_organization_list_members(client: Beaker, beaker_org_name: str):
client.organization.list_members(beaker_org_name)
def test_organization_get_member(client: Beaker):
client.organization.get_member(client.account.name)
| beaker-py-main | tests/organization_test.py |
from beaker import Beaker
def test_image_get(client: Beaker, hello_world_image_name: str):
# Get by full name.
image = client.image.get(hello_world_image_name)
# Get by ID.
client.image.get(image.id)
# Get by name.
assert image.name is not None
client.image.get(image.name)
def test_image_url(client: Beaker, hello_world_image_name: str):
assert (
client.image.url(hello_world_image_name)
== "https://beaker.org/im/01FPB7XCX3GHKW5PS9J4623EBN"
)
| beaker-py-main | tests/image_test.py |
from beaker import Beaker, CurrentJobStatus, JobKind
def test_job_get(client: Beaker, hello_world_job_id: str):
job = client.job.get(hello_world_job_id)
assert job.id == hello_world_job_id
assert job.status.current == CurrentJobStatus.finalized
assert job.kind == JobKind.execution
assert job.to_json()["kind"] == "execution"
def test_job_results(client: Beaker, hello_world_job_id: str):
results = client.job.results(hello_world_job_id)
assert results is not None
def test_job_logs(client: Beaker, hello_world_job_id: str):
logs = "\n".join(
[
line.strip()
for line in b"".join(list(client.job.logs(hello_world_job_id, quiet=True)))
.decode()
.split("\n")
]
)
assert "Hello from Docker!" in logs
def test_job_logs_since(client: Beaker, hello_world_job_id: str):
logs = "\n".join(
[
line.strip()
for line in b"".join(
list(
client.job.logs(
hello_world_job_id, quiet=True, since="2023-02-11T00:34:19.938308862Z"
)
)
)
.decode()
.split("\n")
]
)
assert "Hello from Docker!" not in logs
| beaker-py-main | tests/job_test.py |
from beaker.client import Beaker
def test_whoami(client: Beaker):
client.account.whoami()
def test_name(client: Beaker):
assert isinstance(client.account.name, str)
def test_list_organizations(client: Beaker):
client.account.list_organizations()
| beaker-py-main | tests/account_test.py |
import pytest
import yaml
from beaker import Beaker
from beaker.config import Config
def test_str_method(client: Beaker):
assert "user_token=***" in str(client.config)
assert client.config.user_token not in str(client.config)
def test_config_from_path_unknown_field(tmp_path):
path = tmp_path / "config.yml"
with open(path, "w") as f:
yaml.dump({"user_token": "foo-bar", "baz": 1}, f)
with pytest.warns(RuntimeWarning, match="Unknown field 'baz' found in config"):
Config.from_path(path)
| beaker-py-main | tests/config_test.py |
import pytest
from flaky import flaky
from beaker import Beaker
@flaky # this can fail if the request to GitHub fails
def test_warn_for_newer_version(monkeypatch):
import beaker.client
import beaker.version
monkeypatch.setattr(Beaker, "CLIENT_VERSION", "0.1.0")
monkeypatch.setattr(beaker.client, "_LATEST_VERSION_CHECKED", False)
with pytest.warns(UserWarning, match="Please upgrade with"):
Beaker.from_env()
# Shouldn't warn a second time.
Beaker.from_env()
def test_str_method(client: Beaker):
str(client)
| beaker-py-main | tests/client_test.py |
from beaker import Beaker
def test_node_get(client: Beaker, beaker_node_id: str):
gpu_count = client.node.get(beaker_node_id).limits.gpu_count
assert gpu_count is not None
assert gpu_count > 0
| beaker-py-main | tests/node_test.py |
import pytest
@pytest.fixture(autouse=True)
def doctest_fixtures(
doctest_namespace,
client,
workspace_name,
docker_image_name,
beaker_image_name,
beaker_cluster_name,
beaker_on_prem_cluster_name,
experiment_name,
dataset_name,
download_path,
beaker_org_name,
beaker_node_id,
secret_name,
group_name,
hello_world_experiment_name,
squad_dataset_name,
tmp_path,
):
doctest_namespace["beaker"] = client
doctest_namespace["workspace_name"] = workspace_name
doctest_namespace["docker_image_name"] = docker_image_name
doctest_namespace["beaker_image_name"] = beaker_image_name
doctest_namespace["beaker_cluster_name"] = beaker_cluster_name
doctest_namespace["beaker_on_prem_cluster_name"] = beaker_on_prem_cluster_name
doctest_namespace["experiment_name"] = experiment_name
doctest_namespace["dataset_name"] = dataset_name
doctest_namespace["download_path"] = download_path
doctest_namespace["beaker_org_name"] = beaker_org_name
doctest_namespace["beaker_node_id"] = beaker_node_id
doctest_namespace["secret_name"] = secret_name
doctest_namespace["group_name"] = group_name
doctest_namespace["hello_world_experiment_name"] = hello_world_experiment_name
doctest_namespace["squad_dataset_name"] = squad_dataset_name
doctest_namespace["tmp_path"] = tmp_path
| beaker-py-main | beaker/conftest.py |
import logging
import os
import warnings
from dataclasses import asdict, dataclass, fields
from pathlib import Path
from typing import ClassVar, Optional, Set
import yaml
from .exceptions import ConfigurationError
DEFAULT_CONFIG_LOCATION: Optional[Path] = None
try:
DEFAULT_CONFIG_LOCATION = Path.home() / ".beaker" / "config.yml"
except RuntimeError:
# Can't locate home directory.
pass
__all__ = ["Config"]
logger = logging.getLogger(__name__)
@dataclass
class Config:
user_token: str
"""
Beaker user token that can be obtained from
`beaker.org <https://beaker.org/user>`_.
"""
agent_address: str = "https://beaker.org"
"""
The address of the Beaker server.
"""
default_org: Optional[str] = "ai2"
"""
Default Beaker organization to use.
"""
default_workspace: Optional[str] = None
"""
Default Beaker workspace to use.
"""
default_image: Optional[str] = None
"""
The default image used for interactive sessions.
"""
ADDRESS_KEY: ClassVar[str] = "BEAKER_ADDR"
CONFIG_PATH_KEY: ClassVar[str] = "BEAKER_CONFIG"
TOKEN_KEY: ClassVar[str] = "BEAKER_TOKEN"
IGNORE_FIELDS: ClassVar[Set[str]] = {"updater_timestamp", "updater_message"}
def __str__(self) -> str:
fields_str = "user_token=***, " + ", ".join(
[f"{f.name}={getattr(self, f.name)}" for f in fields(self) if f.name != "user_token"]
)
return f"{self.__class__.__name__}({fields_str})"
@classmethod
def from_env(cls, **overrides) -> "Config":
"""
Initialize a config from environment variables or a local config file if one
can be found.
.. note::
Environment variables take precedence over values in the config file.
"""
config: Config
path = cls.find_config()
if path is not None:
config = cls.from_path(path)
if cls.TOKEN_KEY in os.environ:
config.user_token = os.environ[cls.TOKEN_KEY]
elif cls.TOKEN_KEY in os.environ:
config = cls(
user_token=os.environ[cls.TOKEN_KEY],
)
elif "user_token" in overrides:
config = cls(user_token=overrides["user_token"])
else:
raise ConfigurationError(
f"Failed to find config file or environment variable '{cls.TOKEN_KEY}'"
)
# Override with environment variables.
if cls.ADDRESS_KEY in os.environ:
config.agent_address = os.environ[cls.ADDRESS_KEY]
# Override with any arguments passed to this method.
for name, value in overrides.items():
if hasattr(config, name):
setattr(config, name, value)
else:
raise ConfigurationError(f"Beaker config has no attribute '{name}'")
if not config.user_token:
raise ConfigurationError("Invalid Beaker user token, token is empty")
return config
@classmethod
def from_path(cls, path: Path) -> "Config":
"""
Initialize a config from a local config file.
"""
with open(path) as config_file:
logger.debug("Loading beaker config from '%s'", path)
field_names = {f.name for f in fields(cls)}
data = yaml.load(config_file, Loader=yaml.SafeLoader)
for key in list(data.keys()):
if key in cls.IGNORE_FIELDS:
data.pop(key)
continue
value = data[key]
if key not in field_names:
del data[key]
warnings.warn(
f"Unknown field '{key}' found in config '{path}'. "
f"If this is a bug, please report it at https://github.com/allenai/beaker-py/issues/new/",
RuntimeWarning,
)
elif isinstance(value, str) and value == "":
# Replace empty strings with `None`
data[key] = None
return cls(**data)
def save(self, path: Optional[Path] = None):
"""
Save the config to the given path.
"""
if path is None:
if self.CONFIG_PATH_KEY in os.environ:
path = Path(os.environ[self.CONFIG_PATH_KEY])
elif DEFAULT_CONFIG_LOCATION is not None:
path = DEFAULT_CONFIG_LOCATION
if path is None:
raise ValueError("param 'path' is required")
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as config_file:
yaml.dump(asdict(self), config_file)
@classmethod
def find_config(cls) -> Optional[Path]:
if cls.CONFIG_PATH_KEY in os.environ:
path = Path(os.environ[cls.CONFIG_PATH_KEY])
if path.is_file():
return path
elif DEFAULT_CONFIG_LOCATION is not None and DEFAULT_CONFIG_LOCATION.is_file():
return DEFAULT_CONFIG_LOCATION
return None
| beaker-py-main | beaker/config.py |
_MAJOR = "1"
_MINOR = "21"
_PATCH = "0"
_SUFFIX = ""
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| beaker-py-main | beaker/version.py |
import base64
import re
import time
import warnings
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import wraps
from pathlib import Path
from typing import Any, Callable, Optional, Set, Tuple, Type, TypeVar, Union
from .aliases import PathOrStr
from .exceptions import RequestException
BUG_REPORT_URL = (
"https://github.com/allenai/beaker-py/issues/new?assignees=&labels=bug&template=bug_report.yml"
)
_VALIDATION_WARNINGS_ISSUED: Set[Tuple[str, str]] = set()
def issue_data_model_warning(cls: Type, key: str, value: Any):
warn_about = (cls.__name__, key)
if warn_about not in _VALIDATION_WARNINGS_ISSUED:
_VALIDATION_WARNINGS_ISSUED.add(warn_about)
warnings.warn(
f"Found unknown field '{key}: {value}' for data model '{cls.__name__}'. "
"This may be a newly added field that hasn't been defined in beaker-py yet. "
"Please submit an issue report about this here:\n"
f"{BUG_REPORT_URL}",
RuntimeWarning,
)
def to_lower_camel(s: str) -> str:
"""
Convert a snake-case string into lower camel case.
"""
parts = s.split("_")
return parts[0] + "".join([p.title() for p in parts[1:]])
def to_snake_case(s: str) -> str:
"""
Convert a lower camel case strings into snake case.
"""
if s.islower():
return s
parts = []
for c in s:
if c.isupper():
parts.append("_")
parts.append(c.lower())
return "".join(parts)
def path_is_relative_to(path: Path, other: PathOrStr) -> bool:
"""
This is copied from :meth:`pathlib.PurePath.is_relative_to` to support older Python
versions (before 3.9, when this method was introduced).
"""
try:
path.relative_to(other)
return True
except ValueError:
return False
T = TypeVar("T")
_property_cache: "OrderedDict[Tuple[str, str], Tuple[float, Any]]" = OrderedDict()
_property_cache_max_size = 50
def cached_property(ttl: float = 60):
"""
This is used to create a cached property on a :class:`~beaker.services.service_client.ServiceClient`
subclass.
:param ttl: The time-to-live in seconds. The cached value will be evicted from the cache
after this many seconds to ensure it stays fresh.
See :meth:`~beaker.services.account.AccountClient.name`, for example.
"""
def ttl_cached_property(prop) -> property:
@property # type: ignore[misc]
def prop_with_cache(self):
key = (prop.__qualname__, repr(self.config))
cached = _property_cache.get(key)
if cached is not None:
time_cached, value = cached
if time.monotonic() - time_cached <= ttl:
return value
value = prop(self)
_property_cache[key] = (time.monotonic(), value)
while len(_property_cache) > _property_cache_max_size:
_property_cache.popitem(last=False)
return value
return prop_with_cache # type: ignore[return-value]
return ttl_cached_property
def format_since(since: Union[datetime, timedelta, str]) -> str:
if isinstance(since, datetime):
if since.tzinfo is None:
return since.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
else:
return since.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
elif isinstance(since, timedelta):
return f"{since.total_seconds()}s"
else:
return since
TIMESTAMP_RE = re.compile(rb"^([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]+Z)(.*)$")
def split_timestamp(s: bytes) -> Optional[str]:
match = TIMESTAMP_RE.match(s)
if match is not None:
return match.group(1).decode()
else:
return None
def log_and_wait(retries_so_far: int, err: Exception) -> None:
from .client import Beaker
retry_in = min(Beaker.BACKOFF_FACTOR * (2**retries_so_far), Beaker.BACKOFF_MAX)
Beaker.logger.debug("Request failed with: %s\nRetrying in %d seconds...", err, retry_in)
time.sleep(retry_in)
def retriable(
on_failure: Optional[Callable[..., None]] = None,
recoverable_errors: Tuple[Type[Exception], ...] = (RequestException,),
):
"""
Use to make a service client method more robust by allowing retries.
"""
def parametrize_decorator(func: Callable[..., T]) -> Callable[..., T]:
@wraps(func)
def retriable_method(*args, **kwargs) -> T:
from .client import Beaker
retries = 0
while True:
try:
return func(*args, **kwargs)
except recoverable_errors as err:
if retries < Beaker.MAX_RETRIES:
if on_failure is not None:
on_failure()
log_and_wait(retries, err)
retries += 1
else:
raise
return retriable_method
return parametrize_decorator
def format_cursor(cursor: int) -> str:
if cursor < 0:
raise ValueError("cursor must be >= 0")
return base64.urlsafe_b64encode(cursor.to_bytes(8, "little")).decode()
| beaker-py-main | beaker/util.py |
import logging
import os
from contextlib import contextmanager
from typing import Generator, Optional, Tuple, Union
import docker
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from .config import Config
from .data_model import *
from .exceptions import *
from .services import *
from .version import VERSION
__all__ = ["Beaker"]
_LATEST_VERSION_CHECKED = False
class Beaker:
"""
A client for interacting with `Beaker <https://beaker.org>`_.
:param config: The Beaker :class:`Config`.
:param check_for_upgrades: Automatically check that beaker-py is up-to-date. You'll see
a warning if it isn't.
:param timeout: How many seconds to wait for the Beaker server to send data before giving up,
as a float, or a (connect timeout, read timeout) tuple.
:param session: Set to ``True`` or a :class:`requests.Session` instance to
force the Beaker client to use a single :class:`~requests.Session`
for all HTTP requests to the Beaker server for the life of the client.
.. seealso::
The :meth:`session()` context manager.
.. warning::
You should only set this argument for short-lived clients.
If you're initializing a :class:`Beaker` client with this that's supposed to stick
around indefinitely, consider using the :meth:`session()` context manager
intermittently instead.
:param pool_maxsize: The maximum size of the connection pool to use.
If not specified, a large default value will be used based on a multiple of the number
of CPUs available.
:param user_agent: Override the "User-Agent" header used in requests to the Beaker server.
The easiest way to initialize a Beaker client is with :meth:`.from_env()`:
>>> beaker = Beaker.from_env()
You can then interact with the various Beaker services through the corresponding
property. For example, to manage workspaces, use :data:`Beaker.workspace`:
>>> beaker.workspace.get(workspace_name).full_name
'ai2/beaker-py-testing'
.. tip::
Use the right side nav to browse through the API docs for all of the different services.
"""
RECOVERABLE_SERVER_ERROR_CODES = (429, 500, 502, 503, 504)
MAX_RETRIES = 5
BACKOFF_FACTOR = 1
BACKOFF_MAX = 120
API_VERSION = "v3"
CLIENT_VERSION = VERSION
logger = logging.getLogger("beaker")
def __init__(
self,
config: Config,
check_for_upgrades: bool = True,
timeout: Optional[Union[float, Tuple[float, float]]] = 5.0,
session: Optional[Union[bool, requests.Session]] = None,
pool_maxsize: Optional[int] = None,
user_agent: str = f"beaker-py v{VERSION}",
):
self._config = config
self._docker: Optional[docker.DockerClient] = None
self._pool_maxsize = pool_maxsize or min(100, (os.cpu_count() or 16) * 6)
self.user_agent = user_agent
self._session: Optional[requests.Session] = (
None
if not session
else (session if isinstance(session, requests.Session) else self._make_session())
)
self._timeout = timeout
# Initialize service clients:
self._account = AccountClient(self)
self._organization = OrganizationClient(self)
self._workspace = WorkspaceClient(self)
self._cluster = ClusterClient(self)
self._node = NodeClient(self)
self._dataset = DatasetClient(self)
self._image = ImageClient(self)
self._job = JobClient(self)
self._experiment = ExperimentClient(self)
self._secret = SecretClient(self)
self._group = GroupClient(self)
# Ensure default workspace exists.
if self._config.default_workspace is not None:
if self._config.default_workspace == "":
raise ValueError("'default_workspace' cannot be an empty string")
self.workspace.ensure(self._config.default_workspace)
# Validate default org.
if self._config.default_org is not None:
if self._config.default_org == "":
raise ValueError("'default_org' cannot be an empty string")
self.organization.get(self._config.default_org)
# See if there's a newer version, and if so, suggest that the user upgrades.
if check_for_upgrades:
self._check_for_upgrades()
def __str__(self) -> str:
return (
f"Beaker("
f"user='{self.account.name}', "
f"default_workspace='{self.config.default_workspace}', "
f"default_org='{self.config.default_org}', "
f"agent_address='{self.config.agent_address}'"
f")"
)
def _check_for_upgrades(self):
global _LATEST_VERSION_CHECKED
if _LATEST_VERSION_CHECKED:
return
import warnings
import packaging.version
import requests
try:
response = requests.get(
"https://api.github.com/repos/allenai/beaker-py/releases/latest", timeout=1
)
if response.ok:
latest_version = packaging.version.parse(response.json()["tag_name"])
_LATEST_VERSION_CHECKED = True
if latest_version > packaging.version.parse(self.CLIENT_VERSION):
warnings.warn(
f"You're using beaker-py v{self.CLIENT_VERSION}, "
f"but a newer version (v{latest_version}) is available.\n\n"
f"Please upgrade with `pip install --upgrade beaker-py`.\n\n"
f"You can find the release notes for v{latest_version} at "
f"https://github.com/allenai/beaker-py/releases/tag/v{latest_version}\n",
UserWarning,
)
except Exception:
pass
@classmethod
def from_env(
cls,
check_for_upgrades: bool = True,
timeout: Optional[Union[float, Tuple[float, float]]] = 5.0,
session: Optional[Union[bool, requests.Session]] = None,
pool_maxsize: Optional[int] = None,
user_agent: str = f"beaker-py v{VERSION}",
**overrides,
) -> "Beaker":
"""
Initialize client from a config file and/or environment variables.
:param check_for_upgrades: Automatically check that beaker-py is up-to-date. You'll see
a warning if it isn't.
:param timeout: How many seconds to wait for the Beaker server to send data before giving up,
as a float, or a (connect timeout, read timeout) tuple.
:param session: Set to ``True`` or a :class:`requests.Session` instance to
force the Beaker client to use a single :class:`~requests.Session`
for all HTTP requests to the Beaker server.
.. seealso::
The :meth:`session()` context manager.
.. warning::
You should only set this argument for short-lived clients.
If you're initializing a :class:`Beaker` client with this that's supposed to stick
around indefinitely, consider using the :meth:`session()` context manager
intermittently instead.
:param pool_maxsize: The maximum size of the connection pool to use.
If not specified, a large default value will be used based on a multiple of the number
of CPUs available.
:param user_agent: Override the "User-Agent" header used in requests to the Beaker server.
:param overrides: Fields in the :class:`Config` to override.
.. note::
This will use the same config file that the `Beaker command-line client
<https://github.com/allenai/beaker/>`_
creates and uses, which is usually located at ``$HOME/.beaker/config.yml``.
If you haven't configured the command-line client, then you can alternately just
set the environment variable ``BEAKER_TOKEN`` to your Beaker `user token <https://beaker.org/user>`_.
"""
return cls(
Config.from_env(**overrides),
check_for_upgrades=check_for_upgrades,
timeout=timeout,
session=session,
pool_maxsize=pool_maxsize,
user_agent=user_agent,
)
def _make_session(self) -> requests.Session:
session = requests.Session()
retries = Retry(
total=self.MAX_RETRIES * 2,
connect=self.MAX_RETRIES,
status=self.MAX_RETRIES,
backoff_factor=self.BACKOFF_FACTOR,
status_forcelist=self.RECOVERABLE_SERVER_ERROR_CODES,
)
session.mount("https://", HTTPAdapter(max_retries=retries, pool_maxsize=self._pool_maxsize))
return session
@contextmanager
def session(self, session: Optional[requests.Session] = None) -> Generator[None, None, None]:
"""
A context manager that forces the Beaker client to reuse a single :class:`requests.Session`
for all HTTP requests to the Beaker server.
This can improve performance when calling a series of a client methods in a row.
:examples:
>>> with beaker.session():
... n_images = len(beaker.workspace.images())
... n_datasets = len(beaker.workspace.datasets())
:param session: The session to use. If not provided a default will be used.
.. warning::
Only set the ``session`` argument if you really know what you're doing! Otherwise
just leave this as ``None``.
"""
current = self._session
session = session or self._make_session()
try:
self._session = session
yield None
finally:
self._session = current
session.close()
@property
def config(self) -> Config:
"""
The client's :class:`Config`.
"""
return self._config
@property
def account(self) -> AccountClient:
"""
Manage accounts.
:examples:
>>> beaker.account.name
'petew'
.. tip::
See the `Accounts Overview <overview.html#accounts>`_ for a walk-through of the
main methods, or check out the `Account API Docs <#account>`_
to see all of the available methods.
"""
return self._account
@property
def organization(self) -> OrganizationClient:
"""
Manage organizations.
:examples:
>>> beaker.organization.get("ai2").display_name
'AI2'
.. tip::
See the `Organizations Overview <overview.html#organizations>`_ for a walk-through of the
main methods, or check out the `Organization API Docs <#organization>`_
to see all of the available methods.
"""
return self._organization
@property
def workspace(self) -> WorkspaceClient:
"""
Manage workspaces.
:examples:
>>> beaker.workspace.datasets(
... match="squad",
... uncommitted=False,
... results=False,
... )[0].full_name
'petew/squad-train'
.. tip::
See the `Workspaces Overview <overview.html#workspaces>`_ for a walk-through of the
main methods, or check out the `Workspace API Docs <#workspace>`_
to see all of the available methods.
"""
return self._workspace
@property
def cluster(self) -> ClusterClient:
"""
Manage clusters.
:examples:
>>> beaker.cluster.get(beaker_cluster_name).name
'ai2/canary'
.. tip::
See the `Clusters Overview <overview.html#clusters>`_ for a walk-through of the
main methods, or check out the `Cluster API Docs <#cluster>`_
to see all of the available methods.
"""
return self._cluster
@property
def node(self) -> NodeClient:
"""
Manage nodes.
:examples:
>>> beaker.node.get(beaker_node_id).limits.gpu_count
8
.. tip::
See the `Nodes Overview <overview.html#nodes>`_ for a walk-through of the
main methods, or check out the `Node API Docs <#node>`_
to see all of the available methods.
"""
return self._node
@property
def dataset(self) -> DatasetClient:
"""
Manage datasets.
:examples:
>>> [file_info.path for file_info in beaker.dataset.ls("petew/squad-train")]
['squad-train.arrow']
.. tip::
See the `Datasets Overview <overview.html#datasets>`_ for a walk-through of the
main methods, or check out the `Dataset API Docs <#dataset>`_
to see all of the available methods.
"""
return self._dataset
@property
def image(self) -> ImageClient:
"""
Manage images.
:examples:
>>> beaker.image.get("petew/hello-world").original_tag
'hello-world'
.. tip::
See the `Images Overview <overview.html#images>`_ for a walk-through of the
main methods, or check out the `Image API Docs <#image>`_
to see all of the available methods.
"""
return self._image
@property
def job(self) -> JobClient:
"""
Manage jobs.
:examples:
>>> running_jobs = beaker.job.list(
... beaker_on_prem_cluster_name,
... finalized=False,
... )
.. tip::
See the `Jobs Overview <overview.html#jobs>`_ for a walk-through of the
main methods, or check out the `Job API Docs <#job>`_
to see all of the available methods.
"""
return self._job
@property
def experiment(self) -> ExperimentClient:
"""
Manage experiments.
:examples:
>>> logs = "".join([
... line.decode() for line in
... beaker.experiment.logs("petew/hello-world", quiet=True)
... ])
.. tip::
See the `Experiments Overview <overview.html#experiments>`_ for a walk-through of the
main methods, or check out the `Experiment API Docs <#experiment>`_
to see all of the available methods.
"""
return self._experiment
@property
def secret(self) -> SecretClient:
"""
Manage secrets.
:examples:
>>> secret = beaker.secret.write(secret_name, "foo")
.. tip::
See the `Secrets Overview <overview.html#secrets>`_ for a walk-through of the
main methods, or check out the `Secret API Docs <#secret>`_
to see all of the available methods.
"""
return self._secret
@property
def group(self) -> GroupClient:
"""
Manage groups.
:examples:
>>> group = beaker.group.create(group_name)
.. tip::
See the `Groups Overview <overview.html#groups>`_ for a walk-through of the
main methods, or check out the `Group API Docs <#group>`_
to see all of the available methods.
"""
return self._group
@property
def docker(self) -> docker.DockerClient:
if self._docker is None:
self._docker = docker.from_env()
assert self._docker is not None
return self._docker
| beaker-py-main | beaker/client.py |
"""
Initialize a :class:`Beaker client <beaker.Beaker>` with :meth:`Beaker.from_env()`:
>>> from beaker import *
>>> beaker = Beaker.from_env(default_workspace=workspace_name)
Accounts
--------
Manage your Beaker account with :data:`Beaker.account`.
For example, you can check who you are logged in as with
:meth:`Beaker.account.whoami() <services.AccountClient.whoami>`:
>>> username = beaker.account.whoami().name
.. important::
In this example - and all other examples - ``beaker`` is an instance of the
:class:`Beaker` client class, not the :mod:`beaker` module.
See `Quick start <./quickstart.html>`_ to learn how to instantiate the client.
Organizations
-------------
Manage Beaker organizations with :data:`Beaker.organization`.
For example, you can get information about an organization with
:meth:`Beaker.organization.get() <services.OrganizationClient.get>`:
>>> beaker.organization.get(beaker_org_name).display_name
'AI2'
You can also add, get, list, or remove members with
:meth:`Beaker.organization.add_member() <services.OrganizationClient.add_member>`,
:meth:`.get_member() <services.OrganizationClient.get_member>`,
:meth:`.list_members() <services.OrganizationClient.list_members>`, or
:meth:`.remove_member() <services.OrganizationClient.remove_member>`, respectively.
Workspaces
----------
Manage Beaker workspaces with :data:`Beaker.workspace`.
For example, you can create a workspace with :meth:`Beaker.workspace.ensure() <services.WorkspaceClient.ensure>`:
>>> workspace = beaker.workspace.ensure(workspace_name)
You can retreive metadata about a workspace with :meth:`Beaker.workspace.get() <services.WorkspaceClient.get>`:
>>> beaker.workspace.get(workspace_name).id
'01G370GVHJQZYF50XYXM7VB53N'
You can list datasets in a workspace with
:meth:`Beaker.workspace.datasets() <services.WorkspaceClient.datasets>`:
>>> datasets = beaker.workspace.datasets(workspace_name, results=False)
Similarly, you can list experiments or images with
:meth:`Beaker.workspace.experiments() <services.WorkspaceClient.experiments>`
or
:meth:`Beaker.workspace.images() <services.WorkspaceClient.images>`,
respectively.
Clusters
--------
Manage Beaker clusters with :data:`Beaker.cluster`.
For example, you can get information about a cluster with
:meth:`Beaker.cluster.get() <services.ClusterClient.get>`:
>>> beaker.cluster.get(beaker_cluster_name).name
'ai2/canary'
Or you could check how many GPUs are free on an on-premise cluster with
:meth:`Beaker.cluster.utilization() <services.ClusterClient.utilization>`:
>>> free_gpus = 0
>>> for node_util in beaker.cluster.utilization(beaker_on_prem_cluster_name).nodes:
... free_gpus += node_util.free.gpu_count
Nodes
-----
Manage Beaker nodes with :data:`Beaker.node`.
For example, you can get information about a node with
:meth:`Beaker.node.get() <services.NodeClient.get>`:
>>> beaker.node.get(beaker_node_id).limits.gpu_count
8
Images
------
Manage Beaker images with :data:`Beaker.image`.
For example, upload a local Docker image to Beaker with
:meth:`Beaker.image.create() <services.ImageClient.create>`:
>>> image = beaker.image.create(beaker_image_name, docker_image_name, quiet=True)
The object returned is the same :class:`~data_model.image.Image` object you get from
:meth:`Beaker.image.get() <services.ImageClient.get>`.
It contains some metadata about the image:
>>> image = beaker.image.get(f"{username}/{beaker_image_name}")
>>> image.original_tag
'hello-world'
Experiments
-----------
Manage Beaker experiments with :data:`Beaker.experiment`.
For example, create an experiment with :meth:`Beaker.experiment.create() <services.ExperimentClient.create>`:
>>> spec = ExperimentSpec(
... tasks=[
... TaskSpec(
... name="main",
... image=ImageSource(beaker=image.id),
... context=TaskContext(cluster=beaker_cluster_name),
... result=ResultSpec(
... path="/unused" # required even if the task produces no output.
... ),
... ),
... ],
... )
>>> experiment = beaker.experiment.create(
... experiment_name,
... spec,
... workspace=workspace_name,
... )
Wait for the experiment to complete with
:meth:`Beaker.experiment.wait_for() <services.ExperimentClient.wait_for>`:
>>> experiment = beaker.experiment.wait_for(
... experiment,
... timeout=60 * 5,
... quiet=True,
... )[0]
Get the logs from the execution of a task in an experiment with
:meth:`Beaker.experiment.logs() <services.ExperimentClient.logs>`:
>>> logs = "".join([
... line.decode() for line in
... beaker.experiment.logs(experiment, quiet=True)
... ])
Get the results from a task in an experiment with
:meth:`Beaker.experiment.results <services.ExperimentClient.results>`:
>>> results = beaker.experiment.results(experiment)
Jobs
----
Manage Beaker jobs with :data:`Beaker.job`.
For example, get the logs from a job with :meth:`Beaker.job.logs() <services.JobClient.logs>`
(equivalent to :meth:`Beaker.experiment.logs() <services.ExperimentClient.logs>` when there is
only one task in the experiment):
>>> job = experiment.jobs[0]
>>> logs = "".join([
... line.decode() for line in
... beaker.job.logs(job, quiet=True)
... ])
Datasets
--------
Manage Beaker datasets with :data:`Beaker.dataset`.
For example, create a dataset from a local file with
:meth:`Beaker.dataset.create() <services.DatasetClient.create>`:
>>> dataset = beaker.dataset.create(dataset_name, "README.md", quiet=True)
Or create a dataset from a local directory:
>>> dataset = beaker.dataset.create(dataset_name, "docs/source/", force=True, quiet=True)
.. tip::
The ``force=True`` flag is used to overwrite any existing dataset with the same name.
And download a dataset with :meth:`Beaker.dataset.fetch() <services.DatasetClient.fetch>`:
>>> beaker.dataset.fetch(dataset, target=download_path, quiet=True)
You can also download a single file using
:meth:`Beaker.dataset.stream_file() <services.DatasetClient.stream_file>`:
>>> contents = b"".join(beaker.dataset.stream_file(dataset, "docs/source/conf.py", quiet=True))
Secrets
-------
Manage Beaker secrets with :data:`Beaker.secret`.
For example, you can read, write, or delete secrets with
:meth:`Beaker.secret.read() <services.SecretClient.read>`,
:meth:`Beaker.secret.write() <services.SecretClient.write>`, and
:meth:`Beaker.secret.delete() <services.SecretClient.delete>`, respectively.
Groups
------
Manage Beaker groups with :data:`Beaker.group`.
For example, create a group with :meth:`Beaker.group.create() <services.GroupClient.create>`:
>>> group = beaker.group.create(group_name, experiment)
"""
from .client import *
from .config import *
from .data_model import *
from .exceptions import *
| beaker-py-main | beaker/__init__.py |
import os
from typing import Union
PathOrStr = Union[os.PathLike, str]
| beaker-py-main | beaker/aliases.py |
"""
Exceptions that can be raised by the :class:`~beaker.Beaker` client.
.. tip::
All exceptions inherit from :class:`BeakerError` other than :exc:`HTTPError`,
which is re-exported from :exc:`requests.exceptions.HTTPError`,
and :exc:`ValidationError`, which is re-exported from `pydantic <https://pydantic-docs.helpmanual.io/>`_.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from pydantic import ValidationError # noqa: F401, re-imported here for convenience
from requests.exceptions import ( # noqa: F401, re-imported here for convenience
HTTPError,
RequestException,
)
if TYPE_CHECKING:
from .data_model.experiment import Task
from .data_model.job import Job
ValidationError.__doc__ = """
Raised when data passed into a :mod:`DataModel <beaker.data_model>` is invalid.
"""
__all__ = [
"BeakerError",
"ValidationError",
"HTTPError",
"RequestException",
"NotFoundError",
"AccountNotFound",
"OrganizationNotFound",
"OrganizationNotSet",
"ConfigurationError",
"ImageNotFound",
"ImageConflict",
"WorkspaceNotFound",
"WorkspaceWriteError",
"WorkspaceConflict",
"ClusterNotFound",
"ClusterConflict",
"ExperimentNotFound",
"ExperimentConflict",
"DatasetConflict",
"DatasetNotFound",
"UnexpectedEOFError",
"JobNotFound",
"WorkspaceNotSet",
"NodeNotFound",
"DatasetWriteError",
"DatasetReadError",
"SecretNotFound",
"GroupConflict",
"GroupNotFound",
"DuplicateJobError",
"DuplicateExperimentError",
"TaskNotFound",
"ChecksumFailedError",
"TaskStoppedError",
"JobFailedError",
"JobTimeoutError",
"ExperimentSpecError",
"ThreadCanceledError",
]
class BeakerError(Exception):
"""
Base class for all Beaker errors other than :exc:`HTTPError`, which is re-exported
from :exc:`requests.exceptions.HTTPError`, and :exc:`ValidationError`, which is
re-exported from `pydantic <https://pydantic-docs.helpmanual.io/>`_.
"""
class NotFoundError(BeakerError):
"""
Base class for all "not found" error types.
"""
class AccountNotFound(NotFoundError):
pass
class OrganizationNotFound(NotFoundError):
"""
Raised when a specified organization doesn't exist.
"""
class OrganizationNotSet(BeakerError):
"""
Raised when an identifying doesn't start with an organization name and
:data:`Config.default_org <beaker.Config.default_org>` is not set.
"""
class ConfigurationError(BeakerError):
"""
Raised when the :class:`~beaker.Config` fails to instantiate.
"""
class ImageNotFound(NotFoundError):
pass
class ImageConflict(BeakerError):
"""
Raised when attempting to create/rename an image if an image by that name already exists.
"""
class WorkspaceNotFound(NotFoundError):
pass
class WorkspaceWriteError(BeakerError):
"""
Raised when attempting to modify or add to a workspace that's been archived.
"""
class WorkspaceConflict(BeakerError):
"""
Raised when attempting to create/rename a workspace if a workspace by that name already exists.
"""
class ClusterNotFound(NotFoundError):
pass
class ClusterConflict(BeakerError):
"""
Raised when attempting to create a cluster if a cluster by that name already exists.
"""
class ExperimentNotFound(NotFoundError):
pass
class ExperimentConflict(BeakerError):
"""
Raised when attempting to create/rename an experiment if an experiment by that name already exists.
"""
class DatasetConflict(BeakerError):
"""
Raised when attempting to create/rename a dataset if a dataset by that name already exists.
"""
class DatasetNotFound(NotFoundError):
pass
class UnexpectedEOFError(BeakerError):
"""
Raised when creating a dataset when an empty source file is encountered.
"""
class JobNotFound(NotFoundError):
pass
class WorkspaceNotSet(BeakerError):
"""
Raised when workspace argument is not provided and there is no default workspace set.
"""
class NodeNotFound(NotFoundError):
pass
class DatasetWriteError(BeakerError):
"""
Raised when a write operation on a dataset fails because the dataset has already been committed.
"""
class DatasetReadError(BeakerError):
"""
Raised when a read operation on a dataset fails because the dataset hasn't been committed yet,
or the :data:`~beaker.data_model.Dataset.storage` hasn't been set for some other reason.
"""
class SecretNotFound(NotFoundError):
pass
class GroupConflict(BeakerError):
"""
Raised when attempting to create/rename a group if a group by that name already exists.
"""
class GroupNotFound(NotFoundError):
pass
class DuplicateJobError(BeakerError):
"""
Raised when duplicate jobs are passed into a method that expects unique jobs.
"""
class DuplicateExperimentError(BeakerError):
"""
Raised when duplicate experiments are passed into a method that expects unique experiments.
"""
class TaskNotFound(NotFoundError):
pass
class ChecksumFailedError(BeakerError):
"""
Raised when a downloaded file from a Beaker dataset is corrupted.
"""
class TaskStoppedError(BeakerError):
def __init__(self, msg: Optional[str] = None, task: Optional[Task] = None):
super().__init__(msg)
self.task = task
class JobFailedError(BeakerError):
def __init__(self, msg: Optional[str] = None, job: Optional[Job] = None):
super().__init__(msg)
self.job = job
class JobTimeoutError(BeakerError, TimeoutError):
pass
class ExperimentSpecError(BeakerError):
pass
class ThreadCanceledError(BeakerError):
pass
| beaker-py-main | beaker/exceptions.py |
import io
import time
from typing import List, Optional, Tuple, Union
from rich.console import Console
from rich.live import Live
from rich.panel import Panel
from rich.progress import (
BarColumn,
DownloadColumn,
FileSizeColumn,
MofNCompleteColumn,
Progress,
ProgressColumn,
SpinnerColumn,
Task,
TaskID,
TimeElapsedColumn,
TransferSpeedColumn,
)
from rich.table import Table
from rich.text import Text
class QuietProgress:
"""
A mock `Progress` class that does absolutely nothing.
We use this when users pass `quiet=True` since rich's `Progress` still
prints empty lines with `quiet=True`.
"""
def update(self, *args, **kwargs):
del args, kwargs
def add_task(self, *args, **kwargs):
del args, kwargs
def advance(self, *args, **kwargs):
del args, kwargs
def stop_task(self, *args, **kwargs):
del args, kwargs
def __enter__(self):
return self
def __exit__(self, *args, **kwargs): # type: ignore
del args, kwargs
class QuietLive:
"""
Quiet version of rich's `Live`.
"""
def __enter__(self):
return self
def __exit__(self, *args, **kwargs): # type: ignore
del args, kwargs
class ImageDownloadUploadColumn(DownloadColumn):
def render(self, task: Task) -> Text:
if task.total is None or int(task.total) == 1:
return Text("")
else:
return super().render(task)
class TaskStatusColumn(ProgressColumn):
def __init__(self):
super().__init__()
self.dots = 0
self.max_dots = 4
self.update_interval = 1.0
self.last_updated = time.time()
def render(self, task: Task) -> Text:
total = max(0, task.total or 0)
completed = max(0, task.completed)
if completed < total:
now = time.time()
if now - self.last_updated > self.update_interval:
self.last_updated = now
self.dots += 1
if self.dots > self.max_dots:
self.dots = 0
return Text("waiting" + ("." * self.dots) + (" " * (self.max_dots - self.dots)))
else:
return Text("\N{check mark} finalized")
class BufferedReaderWithProgress(io.BufferedReader):
def __init__(
self,
handle: Union[io.BufferedReader, io.BytesIO],
progress: Progress,
task_id: TaskID,
close_handle: bool = True,
):
self.handle = handle
self.progress = progress
self.task_id = task_id
self.total_read = 0
self.close_handle = close_handle
@property
def mode(self) -> str:
return "rb"
def __enter__(self) -> "BufferedReaderWithProgress":
self.handle.__enter__()
return self
def __exit__(self, *_):
self.close()
@property
def closed(self) -> bool:
return self.handle.closed
def close(self):
if self.close_handle:
self.handle.close()
def fileno(self):
return self.handle.fileno()
def flush(self):
self.handle.flush()
def isatty(self) -> bool:
return self.handle.isatty()
def readable(self) -> bool:
return self.handle.readable()
def seekable(self) -> bool:
return self.handle.seekable()
def writable(self) -> bool:
return False
def peek(self, size: int = 0) -> bytes:
if isinstance(self.handle, io.BytesIO):
return self.handle.getvalue()[:size]
else:
return self.handle.peek(size)
def read(self, size: Optional[int] = None) -> bytes:
out = self.handle.read(size)
self.progress.advance(self.task_id, len(out))
self.total_read += len(out)
return out
def read1(self, size: int = -1) -> bytes:
out = self.handle.read1(size)
self.progress.advance(self.task_id, len(out))
self.total_read += len(out)
return out
def readinto(self, b):
n = self.handle.readinto(b)
self.progress.advance(self.task_id, n)
self.total_read += n
return n
def readinto1(self, b):
n = self.handle.readinto1(b)
self.progress.advance(self.task_id, n)
self.total_read += n
return n
def readline(self, size: Optional[int] = -1) -> bytes:
out = self.handle.readline(size)
self.progress.advance(self.task_id, len(out))
self.total_read += len(out)
return out
def readlines(self, hint: int = -1) -> List[bytes]:
lines = self.handle.readlines(hint)
for line in lines:
self.progress.advance(self.task_id, len(line))
self.total_read += len(line)
return lines
def seek(self, offset: int, whence: int = 0) -> int:
pos = self.handle.seek(offset, whence)
self.progress.update(self.task_id, completed=pos)
return pos
def tell(self) -> int:
return self.handle.tell()
@property
def raw(self):
return self.handle.raw
def detach(self):
return self.handle.detach()
def write(self, _) -> int:
raise io.UnsupportedOperation("write")
def writelines(self, _):
raise io.UnsupportedOperation("write")
def get_experiments_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
MofNCompleteColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
# disable=quiet,
)
def get_jobs_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
TaskStatusColumn(),
TimeElapsedColumn(),
# disable=quiet,
)
def get_logs_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
SpinnerColumn(),
FileSizeColumn(),
TimeElapsedColumn(),
# disable=quiet,
)
def get_group_experiments_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
SpinnerColumn(),
FileSizeColumn(),
TimeElapsedColumn(),
# disable=quiet,
)
def get_exps_and_jobs_progress(quiet: bool = False) -> Tuple[Live, Progress, Progress]:
if quiet:
return QuietLive(), QuietProgress(), QuietProgress() # type: ignore
else:
experiments_progress = get_experiments_progress(quiet)
jobs_progress = get_jobs_progress(quiet)
progress_table = Table.grid()
progress_table.add_row(
Panel.fit(experiments_progress, title="Overall progress", padding=(1, 2)),
Panel.fit(jobs_progress, title="Task progress", padding=(1, 2)),
)
return (
Live(progress_table, console=None if not quiet else Console(quiet=True)),
experiments_progress,
jobs_progress,
)
def get_dataset_sync_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeElapsedColumn(),
TransferSpeedColumn(),
DownloadColumn(),
# disable=quiet,
)
def get_sized_dataset_fetch_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeElapsedColumn(),
TransferSpeedColumn(),
DownloadColumn(),
# disable=quiet,
)
def get_unsized_dataset_fetch_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
SpinnerColumn(),
TimeElapsedColumn(),
TransferSpeedColumn(),
FileSizeColumn(),
# disable=quiet,
)
def get_image_upload_progress(quiet: bool = False) -> Progress:
if quiet:
return QuietProgress() # type: ignore
else:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
ImageDownloadUploadColumn(),
# disable=quiet,
)
def get_image_download_progress(quiet: bool = False) -> Progress:
return get_image_upload_progress(quiet)
| beaker-py-main | beaker/progress.py |
from datetime import datetime
from typing import Optional
from .account import Account
from .base import BaseModel, StrEnum
__all__ = ["Organization", "OrganizationRole", "OrganizationMember"]
class Organization(BaseModel):
id: str
name: str
description: str
created: datetime
display_name: str
pronouns: Optional[str] = None
class OrganizationRole(StrEnum):
admin = "admin"
member = "member"
class OrganizationMember(BaseModel):
role: OrganizationRole
organization: Organization
user: Account
| beaker-py-main | beaker/data_model/organization.py |
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from pydantic import Field
from .account import Account
from .base import BaseModel, IntEnum, StrEnum, field_validator
from .experiment_spec import (
DataMount,
EnvVar,
ImageSource,
Priority,
ResultSpec,
TaskSpec,
)
__all__ = [
"CurrentJobStatus",
"CanceledCode",
"JobStatus",
"ExecutionResult",
"JobRequests",
"JobLimits",
"JobExecution",
"JobKind",
"Job",
"Jobs",
"JobStatusUpdate",
"JobPatch",
"Session",
]
class CurrentJobStatus(StrEnum):
"""
The status of a job.
"""
created = "created"
scheduled = "scheduled"
running = "running"
idle = "idle"
exited = "exited"
failed = "failed"
finalized = "finalized"
canceled = "canceled"
preempted = "preempted"
class CanceledCode(IntEnum):
not_set = 0
system_preemption = 1
user_preemption = 2
idle = 3
manual_cancellation = 4
class JobStatus(BaseModel):
created: datetime
scheduled: Optional[datetime] = None
started: Optional[datetime] = None
exited: Optional[datetime] = None
failed: Optional[datetime] = None
finalized: Optional[datetime] = None
canceled: Optional[datetime] = None
canceled_for: Optional[str] = None
canceled_code: Optional[CanceledCode] = None
idle_since: Optional[datetime] = None
exit_code: Optional[int] = None
message: Optional[str] = None
@field_validator(
"created", "scheduled", "started", "exited", "failed", "finalized", "canceled", "idle_since"
)
def _validate_datetime(cls, v: Optional[datetime]) -> Optional[datetime]:
if v is not None and v.year == 1:
return None
return v
@property
def current(self) -> CurrentJobStatus:
"""
Get the :class:`CurrentJobStatus`.
:raises ValueError: If status can't be determined.
"""
if self.finalized is not None:
return CurrentJobStatus.finalized
elif self.failed is not None:
return CurrentJobStatus.failed
elif self.exited is not None:
return CurrentJobStatus.exited
elif self.canceled is not None:
if self.canceled_code in {CanceledCode.system_preemption, CanceledCode.user_preemption}:
return CurrentJobStatus.preempted
else:
return CurrentJobStatus.canceled
elif self.idle_since is not None:
return CurrentJobStatus.idle
elif self.started is not None:
return CurrentJobStatus.running
elif self.scheduled is not None:
return CurrentJobStatus.scheduled
elif self.created is not None:
return CurrentJobStatus.created
else:
raise ValueError(f"Invalid status {self}")
class ExecutionResult(BaseModel):
beaker: Optional[str] = None
class JobRequests(BaseModel):
gpu_count: Optional[int] = None
cpu_count: Optional[float] = None
memory: Optional[str] = None
shared_memory: Optional[str] = None
class JobLimits(BaseModel):
cpu_count: Optional[float] = None
memory: Optional[str] = None
gpus: Tuple[str, ...] = Field(default_factory=tuple)
class JobExecution(BaseModel):
task: str
experiment: str
spec: TaskSpec
result: ExecutionResult
workspace: Optional[str] = None
class JobKind(StrEnum):
"""
The kind of job.
"""
execution = "execution"
session = "session"
class Session(BaseModel):
command: Optional[Tuple[str, ...]] = None
env_vars: Optional[Tuple[EnvVar, ...]] = None
datasets: Optional[Tuple[DataMount, ...]] = None
image: Optional[ImageSource] = None
save_image: bool = False
ports: Optional[Tuple[int, ...]] = None
ports_v2: Optional[Tuple[Tuple[int, int], ...]] = None
priority: Optional[Priority] = None
work_dir: Optional[str] = None
identity: Optional[str] = None
constraints: Optional[Dict[str, List[str]]] = None
result: Optional[ResultSpec] = None
class Job(BaseModel):
"""
A :class:`Job` is an execution of a :class:`Task`.
.. tip::
You can check a job's exit code with :data:`job.status.exit_code <JobStatus.exit_code>`.
"""
id: str
kind: JobKind
author: Account
workspace: str
status: JobStatus
name: Optional[str] = None
cluster: Optional[str] = None
execution: Optional[JobExecution] = None
node: Optional[str] = None
requests: Optional[JobRequests] = None
limits: Optional[JobLimits] = None
session: Optional[Session] = None
host_networking: bool = False
port_mappings: Optional[Dict[str, int]] = None
result: Optional[ExecutionResult] = None
@property
def display_name(self) -> str:
return self.name if self.name is not None else self.id
@property
def is_finalized(self) -> bool:
return self.status.current == CurrentJobStatus.finalized
@property
def is_done(self) -> bool:
"""
Same as :meth:`is_finalized()`, kept for backwards compatibility.
"""
return self.status.current == CurrentJobStatus.finalized
@property
def was_preempted(self) -> bool:
return self.status.canceled is not None and self.status.canceled_code in {
CanceledCode.system_preemption,
CanceledCode.user_preemption,
}
@property
def priority(self) -> Optional[Priority]:
"""
Get the priority of the job.
"""
if self.session is not None:
return self.session.priority
elif self.execution is not None:
return self.execution.spec.context.priority
else:
return None
def check(self):
"""
:raises JobFailedError: If the job failed or was canceled.
"""
from ..exceptions import JobFailedError
if self.status.exit_code is not None and self.status.exit_code > 0:
raise JobFailedError(
f"Job '{self.id}' exited with non-zero exit code ({self.status.exit_code})",
job=self,
)
elif self.status.canceled is not None:
raise JobFailedError(f"Job '{self.id}' was canceled", job=self)
elif self.status.failed is not None:
raise JobFailedError(f"Job '{self.id}' failed", job=self)
class Jobs(BaseModel):
data: Optional[Tuple[Job, ...]] = None
next: Optional[str] = None
next_cursor: Optional[str] = None
class JobStatusUpdate(BaseModel):
scheduled: Optional[bool] = None
started: Optional[bool] = None
exit_code: Optional[int] = None
failed: Optional[bool] = None
finalized: Optional[bool] = None
canceled: Optional[bool] = None
canceled_for: Optional[str] = None
canceled_code: Optional[CanceledCode] = None
idle: Optional[bool] = None
message: Optional[str] = None
class JobPatch(BaseModel):
status: Optional[JobStatusUpdate] = None
limits: Optional[JobLimits] = None
priority: Optional[Priority] = None
| beaker-py-main | beaker/data_model/job.py |
from typing import Any, Dict, List, Optional, Union
from pydantic import Field
from ..aliases import PathOrStr
from ..exceptions import *
from .base import BaseModel, StrEnum, field_validator, model_validator
__all__ = [
"ImageSource",
"EnvVar",
"DataSource",
"DataMount",
"ResultSpec",
"TaskResources",
"Priority",
"TaskContext",
"TaskSpec",
"SpecVersion",
"ExperimentSpec",
"Constraints",
]
class ImageSource(BaseModel, frozen=False):
"""
ImageSource describes where Beaker can find a task's image.
Beaker will automatically pull, or download, this image immediately before running the task.
.. attention::
One of either 'beaker' or 'docker' must be set, but not both.
"""
beaker: Optional[str] = None
"""
The full name or ID of a Beaker image.
"""
docker: Optional[str] = None
"""
The tag of a Docker image hosted on the Docker Hub or a private registry.
.. note::
If the tag is from a private registry, the cluster on which the task will run must
be pre-configured to enable access.
"""
class EnvVar(BaseModel, frozen=False):
"""
An :class:`EnvVar` defines an environment variable within a task's container.
.. tip::
If neither 'source' nor 'secret' are set, the value of the environment variable
with default to "".
"""
name: str
"""
Name of the environment variable following Unix rules.
Environment variable names are case sensitive and must be unique.
"""
value: Optional[str] = None
"""
Literal value which can include spaces and special characters.
"""
secret: Optional[str] = None
"""
Source the enviroment variable from a secret in the experiment's workspace.
"""
class DataSource(BaseModel, frozen=False):
"""
.. attention::
Exactly one source field must be set.
"""
beaker: Optional[str] = None
"""
The full name or ID of a Beaker dataset.
.. tip::
Beaker datasets provide the best download performance and are preferred for
frequently used datasets.
"""
host_path: Optional[str] = None
"""
Path to a file or directory on the host.
The executing host must be configured to allow access to this path or one of its parent directories.
Currently the following host paths are allowed on every on-premise machine managed
by the Beaker team:
- ``/net`` for access to NFS.
- ``/raid`` for access to RAID.
- ``/var/beaker/share`` as a shared local scratch space.
"""
result: Optional[str] = None
"""
Name of a previous task whose result will be mounted.
.. important::
A result source implies a dependency, meaning this task will not run until its parent
completes successfully.
"""
secret: Optional[str] = None
"""
Name of a secret within the experiment's workspace which will be mounted as a plain-text file.
"""
@model_validator(mode="before")
def _check_exactly_one_field_set(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if len([v for v in values.values() if v is not None]) != 1:
raise ValueError("Exactly one data source field must be set.")
return values
class DataMount(BaseModel, frozen=False):
"""
Describes how to mount a dataset into a task. All datasets are mounted read-only.
.. seealso::
This is used in the :data:`TaskSpec.datasets` property in :class:`TaskSpec`.
"""
source: DataSource
"""
Location from which Beaker will download the dataset.
"""
mount_path: str
"""
The mount path is where Beaker will place the dataset within the task container.
Mount paths must be absolute and may not overlap with other mounts.
.. error::
Because some environments use case-insensitive file systems, mount paths
differing only in capitalization are disallowed.
"""
sub_path: Optional[str] = None
"""
Sub-path to a file or directory within the mounted dataset.
Sub-paths may be used to mount only a portion of a dataset; files outside of the
mounted path are not downloaded.
For example, given a dataset containing a file ``/path/to/file.csv``,
setting the sub-path to ``path/to`` will result in the task seeing ``{mount_path}/file.csv``.
"""
@classmethod
def new(
cls,
mount_path: str,
sub_path: Optional[str] = None,
beaker: Optional[str] = None,
host_path: Optional[str] = None,
result: Optional[str] = None,
secret: Optional[str] = None,
) -> "DataMount":
"""
A convenience method for quickly creating a new :class:`DataMount`.
:param mount_path: The :data:`mount_path`.
:param sub_path: The :data:`sub_path`.
:param beaker: The :data:`beaker <DataSource.beaker>` argument to :class:`DataSource`.
:param host_path: The :data:`host_path <DataSource.host_path>` argument to :class:`DataSource`.
:param result: The :data:`result <DataSource.result>` argument to :class:`DataSource`.
:param url: The :data:`url <DataSource.url>` argument to :class:`DataSource`.
:param secret: The :data:`secret <DataSource.secret>` argument to :class:`DataSource`.
"""
return cls(
mount_path=mount_path,
sub_path=sub_path,
source=DataSource(
beaker=beaker,
host_path=host_path,
result=result,
secret=secret,
),
)
class ResultSpec(BaseModel, frozen=False):
"""
Describes how to capture a task's results.
Results are captured as datasets from the given location. Beaker monitors this location for
changes and periodically uploads files as they change in near-real-time.
"""
path: str
"""
Directory to which the task will write output files.
"""
class TaskResources(BaseModel, frozen=False):
"""
TaskResources describe minimum external hardware requirements which must be available for a
task to run. Generally, only a GPU request is necessary.
"""
cpu_count: Optional[float] = None
"""
Minimum number of logical CPU cores. It may be fractional.
Examples: ``4``, ``0.5``.
.. tip::
Since CPU is only limited during periods of contention, it's generally not necessary
to specify this field.
"""
gpu_count: Optional[int] = None
"""
Minimum number of GPUs. It must be non-negative.
"""
memory: Optional[str] = None
"""
Minimum available system memory as a number with unit suffix.
Examples: ``2.5GiB``, ``1024m``.
"""
shared_memory: Optional[str] = None
"""
Size of ``/dev/shm`` as a number with unit suffix. Defaults to ``5GiB``.
Examples: ``2.5GiB``, ``1024m``.
"""
class Priority(StrEnum):
"""
Defines the urgency with which a task will run.
"""
urgent = "urgent"
high = "high"
normal = "normal"
low = "low"
preemptible = "preemptible"
class TaskContext(BaseModel, frozen=False):
"""
Describes an execution environment, or how a task should be run.
.. important::
Because contexts depend on external configuration, a given context may be invalid or unavailable
if a task is re-run at a future date.
"""
cluster: Optional[str] = None
"""
The full name or ID of a Beaker cluster on which the task should run.
.. attention::
This field is deprecated. See :data:`TaskSpec.constraints` instead.
"""
priority: Optional[Priority] = None
"""
Set priority to change the urgency with which a task will run.
Tasks with higher priority are placed ahead of tasks with lower priority in the queue.
"""
@field_validator("priority")
def _validate_priority(cls, v: str) -> str:
if v is not None and v not in set(Priority):
raise ValueError(
f"Invalided 'priority'. Value must be one of {[p.value for p in Priority]} (got '{v}')."
)
return v
class Constraints(BaseModel, frozen=False, extra="allow"):
"""
Constraints are specified via the :data:`~TaskSpec.constraints` field in :class:`TaskSpec`.
This type also allows other fields that are not listed here.
"""
cluster: Optional[List[str]] = None
"""
A list of cluster names or IDs on which the task is allowed to be executed.
You are allowed to omit this field for tasks that have preemptible priority,
in which case the task will run on any cluster where you have permissions.
"""
hostname: Optional[List[str]] = None
"""
Hostname constraints.
"""
def __setitem__(self, key: str, val: List[Any]) -> None:
setattr(self, key, val)
class TaskSpec(BaseModel, frozen=False):
"""
A :class:`TaskSpec` defines a :class:`~beaker.data_model.experiment.Task` within an :class:`ExperimentSpec`.
Tasks are Beaker's fundamental unit of work.
A Beaker experiment may contain multiple tasks.
A task may also depend on the results of another task in its experiment,
creating an execution graph.
"""
image: ImageSource
"""
A base image to run, usually built with Docker.
"""
result: ResultSpec
"""
Where the task will place output files.
"""
context: TaskContext
"""
Context describes how and where this task should run.
"""
constraints: Optional[Constraints] = None
"""
Each task can have many constraints. And each constraint can have many values.
Constraints are rules that change where a task is executed,
by influencing the scheduler's placement of the workload.
.. important::
Because constraints depend on external configuration, a given constraints may be invalid or unavailable
if a task is re-run at a future date.
"""
name: Optional[str] = None
"""
Name is used for display and to refer to the task throughout the spec.
It must be unique among all tasks within its experiment.
"""
command: Optional[List[str]] = None
"""
Command is the full shell command to run as a sequence of separate arguments.
If omitted, the image's default command is used, for example Docker's ``ENTRYPOINT`` directive.
If set, default commands such as Docker's ``ENTRYPOINT`` and ``CMD`` directives are ignored.
Example: ``["python", "-u", "main.py"]``
"""
arguments: Optional[List[str]] = None
"""
Arguments are appended to the command and replace default arguments such as Docker's ``CMD`` directive.
If ``command`` is omitted, arguments are appended to the default command, Docker's ``ENTRYPOINT`` directive.
Example: If ``command`` is ``["python", "-u", "main.py"]``, specifying arguments
``["--quiet", "some-arg"]`` will run the command ``python -u main.py --quiet some-arg``.
"""
env_vars: Optional[List[EnvVar]] = None
"""
Sequence of environment variables passed to the container.
"""
datasets: Optional[List[DataMount]] = None
"""
External data sources mounted into the task as files.
"""
resources: Optional[TaskResources] = None
"""
External hardware requirements, such as memory or GPU devices.
"""
host_networking: bool = False
"""
Enables the task to use the host's network.
"""
replicas: Optional[int] = None
"""
The number of replica tasks to create based on this template.
"""
leader_selection: bool = False
"""
Enables leader selection for the replicas and passes the leader's hostname to the replicas.
"""
@classmethod
def new(
cls,
name: str,
cluster: Optional[Union[str, List[str]]] = None,
beaker_image: Optional[str] = None,
docker_image: Optional[str] = None,
result_path: str = "/unused",
priority: Optional[Union[str, Priority]] = None,
**kwargs,
) -> "TaskSpec":
"""
A convenience method for quickly creating a new :class:`TaskSpec`.
:param name: The :data:`name` of the task.
:param cluster: The cluster or clusters where the experiment can run.
.. tip::
Omitting the cluster will allow your experiment to run on *any* on-premise
cluster, but you can only do this with preemptible jobs.
:param beaker_image: The :data:`beaker <ImageSource.beaker>` image name in the
:data:`image` source.
.. important::
Mutually exclusive with ``docker_image``.
:param docker_image: The :data:`docker <ImageSource.docker>` image name in the
:data:`image` source.
.. important::
Mutually exclusive with ``beaker_image``.
:param priority: The :data:`priority <TaskContext.priority>` of the :data:`context`.
:param kwargs: Additional kwargs are passed as-is to :class:`TaskSpec`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... cluster="ai2/cpu-cluster",
... docker_image="hello-world",
... )
"""
constraints = kwargs.pop("constraints", None)
if constraints is not None and not isinstance(constraints, Constraints):
constraints = Constraints(**constraints)
if cluster is not None:
if constraints is not None and constraints.cluster:
raise ValueError("'cluster' can only be specified one way")
if isinstance(cluster, list):
if constraints is not None:
constraints.cluster = cluster
else:
constraints = Constraints(cluster=cluster)
elif isinstance(cluster, str):
if constraints is not None:
constraints.cluster = [cluster]
else:
constraints = Constraints(cluster=[cluster])
return TaskSpec(
name=name,
image=ImageSource(beaker=beaker_image, docker=docker_image),
result=ResultSpec(path=result_path),
context=TaskContext(priority=None if priority is None else Priority(priority)),
constraints=constraints,
**kwargs,
)
def with_image(self, **kwargs) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`image`.
:param kwargs: Key-word arguments that are passed directly to :class:`ImageSource`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_image(beaker="hello-world")
>>> assert task_spec.image.beaker == "hello-world"
"""
return self.model_copy(deep=True, update={"image": ImageSource(**kwargs)})
def with_result(self, **kwargs) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`result`.
:param kwargs: Key-word arguments that are passed directly to :class:`ResultSpec`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_result(path="/output")
>>> assert task_spec.result.path == "/output"
"""
return self.model_copy(deep=True, update={"result": ResultSpec(**kwargs)})
def with_context(self, **kwargs) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`context`.
:param kwargs: Key-word arguments that are passed directly to :class:`TaskContext`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_context(cluster="ai2/general-cirrascale")
>>> assert task_spec.context.cluster == "ai2/general-cirrascale"
"""
return self.model_copy(deep=True, update={"context": TaskContext(**kwargs)})
def with_name(self, name: str) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`name`.
:param name: The new name.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_name("Hi there!")
>>> assert task_spec.name == "Hi there!"
"""
return self.model_copy(deep=True, update={"name": name})
def with_command(self, command: List[str]) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`command`.
:param command: The new command.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_command(["echo"])
>>> assert task_spec.command == ["echo"]
"""
return self.model_copy(deep=True, update={"command": command})
def with_arguments(self, arguments: List[str]) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`arguments`.
:param arguments: The new arguments.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_arguments(["Hello", "World!"])
>>> assert task_spec.arguments == ["Hello", "World!"]
"""
return self.model_copy(deep=True, update={"arguments": arguments})
def with_resources(self, **kwargs) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`resources`.
:param kwargs: Key-word arguments are passed directly to :class:`TaskResources`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_resources(gpu_count=2)
>>> assert task_spec.resources.gpu_count == 2
"""
return self.model_copy(deep=True, update={"resources": TaskResources(**kwargs)})
def with_dataset(self, mount_path: str, **kwargs) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with an additional input :data:`dataset <datasets>`.
:param mount_path: The :data:`mount_path <DataMount>` of the :class:`DataMount`.
:param kwargs: Additional kwargs are passed as-is to :meth:`DataMount.new()`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_dataset("/data/foo", beaker="foo")
>>> assert task_spec.datasets
"""
return self.model_copy(
deep=True,
update={
"datasets": [d.model_copy(deep=True) for d in self.datasets or []]
+ [DataMount.new(mount_path, **kwargs)]
},
)
def with_env_var(
self, name: str, value: Optional[str] = None, secret: Optional[str] = None
) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with an additional input :data:`env_var <env_vars>`.
:param name: The :data:`name <EnvVar.name>` of the :class:`EnvVar`.
:param value: The :data:`value <EnvVar.value>` of the :class:`EnvVar`.
:param secret: The :data:`secret <EnvVar.secret>` of the :class:`EnvVar`.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... env_vars=[EnvVar(name="bar", value="secret!")],
... ).with_env_var("baz", value="top, top secret")
>>> assert len(task_spec.env_vars) == 2
"""
return self.model_copy(
deep=True,
update={
"env_vars": [d.model_copy(deep=True) for d in self.env_vars or []]
+ [EnvVar(name=name, value=value, secret=secret)]
},
)
def with_constraint(self, **kwargs: List[str]) -> "TaskSpec":
"""
Return a new :class:`TaskSpec` with the given :data:`constraints`.
:param kwargs: Constraint name, constraint values.
:examples:
>>> task_spec = TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... ).with_constraint(cluster=['ai2/cpu-cluster'])
>>> assert task_spec.constraints['cluster'] == ['ai2/cpu-cluster']
"""
constraints = (
Constraints(**kwargs)
if self.constraints is None
else self.constraints.model_copy(deep=True, update=kwargs)
)
return self.model_copy(
deep=True,
update={
"constraints": constraints,
},
)
class SpecVersion(StrEnum):
v2 = "v2"
v2_alpha = "v2-alpha"
class ExperimentSpec(BaseModel, frozen=False):
"""
Experiments are the main unit of execution in Beaker.
An :class:`ExperimentSpec` defines an :class:`~beaker.data_model.experiment.Experiment`.
:examples:
>>> spec = ExperimentSpec(
... tasks=[
... TaskSpec(
... name="hello",
... image=ImageSource(docker="hello-world"),
... context=TaskContext(cluster="ai2/cpu-only"),
... result=ResultSpec(
... path="/unused" # required even if the task produces no output.
... ),
... ),
... ],
... )
"""
tasks: List[TaskSpec] = Field(default_factory=tuple)
"""
Specifications for each process to run.
"""
version: SpecVersion = SpecVersion.v2
"""
Must be 'v2' for now.
"""
description: Optional[str] = None
"""
Long-form explanation for an experiment.
"""
@field_validator("tasks")
def _validate_tasks(cls, v: List[TaskSpec]) -> List[TaskSpec]:
task_names = set()
for task in v:
if task.name is None:
continue
if task.name in task_names:
raise ValueError(f"Duplicate task name '{task.name}'")
else:
task_names.add(task.name)
return v
@classmethod
def from_file(cls, path: PathOrStr) -> "ExperimentSpec":
"""
Load an :class:`ExperimentSpec` from a YAML file.
"""
import yaml
with open(path) as spec_file:
raw_spec = yaml.load(spec_file, Loader=yaml.SafeLoader)
return cls.from_json(raw_spec)
@classmethod
def new(
cls,
task_name: str = "main",
description: Optional[str] = None,
cluster: Optional[Union[str, List[str]]] = None,
beaker_image: Optional[str] = None,
docker_image: Optional[str] = None,
result_path: str = "/unused",
priority: Optional[Union[str, Priority]] = None,
**kwargs,
) -> "ExperimentSpec":
"""
A convenience method for creating a new :class:`ExperimentSpec` with a single task.
:param task_name: The name of the task.
:param description: A description of the experiment.
:param cluster: The cluster or clusters where the experiment can run.
.. tip::
Omitting the cluster will allow your experiment to run on *any* on-premise
cluster, but you can only do this with preemptible jobs.
:param beaker_image: The :data:`beaker <ImageSource.beaker>` image name in the
:data:`image` source.
.. important::
Mutually exclusive with ``docker_image``.
:param docker_image: The :data:`docker <ImageSource.docker>` image name in the
:data:`image` source.
.. important::
Mutually exclusive with ``beaker_image``.
:param priority: The :data:`priority <TaskContext.priority>` of the :data:`context`.
:param kwargs: Additional kwargs are passed as-is to :class:`TaskSpec`.
:examples:
Create a preemptible experiment that can run an any on-premise cluster:
>>> spec = ExperimentSpec.new(
... docker_image="hello-world",
... priority=Priority.preemptible,
... )
"""
return cls(
description=description,
tasks=[
TaskSpec.new(
task_name,
cluster=cluster,
beaker_image=beaker_image,
docker_image=docker_image,
result_path=result_path,
priority=priority,
**kwargs,
)
],
)
def to_file(self, path: PathOrStr) -> None:
"""
Write the experiment spec to a YAML file.
"""
import yaml
raw_spec = self.to_json()
with open(path, "wt") as spec_file:
yaml.dump(raw_spec, spec_file, Dumper=yaml.SafeDumper)
def with_task(self, task: TaskSpec) -> "ExperimentSpec":
"""
Return a new :class:`ExperimentSpec` with an additional task.
:param task: The task to add.
:examples:
>>> spec = ExperimentSpec().with_task(
... TaskSpec.new(
... "hello-world",
... docker_image="hello-world",
... )
... )
"""
if task.name is not None:
for other_task in self.tasks:
if task.name == other_task.name:
raise ValueError(f"A task with the name '{task.name}' already exists")
return self.model_copy(
deep=True,
update={"tasks": [d.model_copy(deep=True) for d in self.tasks or []] + [task]},
)
def with_description(self, description: str) -> "ExperimentSpec":
"""
Return a new :class:`ExperimentSpec` with a different description.
:param description: The new description.
:examples:
>>> ExperimentSpec(description="Hello, World!").with_description(
... "Hello, Mars!"
... ).description
'Hello, Mars!'
"""
return self.model_copy(deep=True, update={"description": description})
def validate(self):
for task in self.tasks:
if (task.image.beaker is None) == (task.image.docker is None):
raise ExperimentSpecError(
"Exactly one of 'beaker' or 'docker' must be specified for image source"
)
| beaker-py-main | beaker/data_model/experiment_spec.py |
from .account import *
from .cluster import *
from .dataset import *
from .experiment import *
from .experiment_spec import *
from .group import *
from .image import *
from .job import *
from .node import *
from .organization import *
from .secret import *
from .workspace import *
| beaker-py-main | beaker/data_model/__init__.py |
from typing import Any, ClassVar, Dict, Set, Type
from pydantic import BaseModel as _BaseModel
from pydantic import ConfigDict, model_validator
from ..util import issue_data_model_warning, to_snake_case
class BaseModelV2(_BaseModel):
"""
The Pydantic v2 base class for a Beaker data models.
"""
model_config = ConfigDict(
validate_assignment=True, use_enum_values=True, frozen=True, extra="ignore"
)
IGNORE_FIELDS: ClassVar[Set[str]] = set()
@model_validator(mode="before")
def _validate_and_rename_to_snake_case( # type: ignore
cls: Type["BaseModelV2"], values: Dict[str, Any] # type: ignore
) -> Dict[str, Any]:
"""
Raw data from the Beaker server will use lower camel case.
"""
# In some cases we get an instance instead of a dict.
# We'll just punt there and hope for the best.
if not isinstance(values, dict):
return values
as_snake_case = {to_snake_case(k): v for k, v in values.items()}
for key, value in as_snake_case.items():
if (
cls.model_config["extra"] != "allow" # type: ignore
and key not in cls.model_fields
and key not in cls.IGNORE_FIELDS
):
issue_data_model_warning(cls, key, value)
return as_snake_case
| beaker-py-main | beaker/data_model/_base_v2.py |
from datetime import datetime
from typing import Optional, Tuple, Union
from urllib.parse import urlparse
from .account import Account
from .base import BaseModel, BasePage, StrEnum, field_validator
from .workspace import WorkspaceRef
__all__ = [
"DatasetStorage",
"DatasetSize",
"Dataset",
"DatasetInfo",
"DatasetInfoPage",
"Digest",
"DigestHashAlgorithm",
"FileInfo",
"DatasetsPage",
"DatasetSpec",
"DatasetPatch",
"DatasetSort",
]
class DatasetStorage(BaseModel):
id: str
token: str
token_expires: datetime
address: Optional[str] = None
url: Optional[str] = None
urlv2: Optional[str] = None
total_size: Optional[int] = None
num_files: Optional[int] = None
@field_validator("address")
def _validate_address(cls, v: Optional[str]) -> Optional[str]:
if v is not None and v.startswith("fh://"):
# HACK: fix prior to https://github.com/allenai/beaker/pull/2962
return v.replace("fh://", "https://", 1)
else:
return v
@property
def scheme(self) -> Optional[str]:
return "fh" if self.urlv2 is None else urlparse(self.urlv2).scheme
@property
def base_url(self) -> str:
if self.address is not None:
return self.address
elif self.urlv2 is not None:
return f"https://{urlparse(self.urlv2).netloc}"
else:
raise ValueError("Missing field 'urlv2' or 'address'")
class DatasetSize(BaseModel):
files: int
bytes: int
final: Optional[bool] = None
bytes_human: Optional[str] = None
class Dataset(BaseModel):
id: str
name: Optional[str] = None
full_name: Optional[str] = None
description: Optional[str] = None
author: Account
created: datetime
committed: Optional[datetime] = None
workspace_ref: WorkspaceRef
source_execution: Optional[str] = None
storage: Optional[DatasetStorage] = None
@property
def display_name(self) -> str:
return self.name if self.name is not None else self.id
@property
def workspace(self) -> WorkspaceRef:
return self.workspace_ref
@field_validator("committed")
def _validate_datetime(cls, v: Optional[datetime]) -> Optional[datetime]:
if v is not None and v.year == 1:
return None
return v
class DigestHashAlgorithm(StrEnum):
"""
Supported hash algorithms for file :class:`Digest`.
"""
SHA256 = "SHA256"
SHA512 = "SHA512"
MD5 = "MD5"
def hasher(self):
"""
Get a :mod:`hasher <hashlib>` object for the given algorithm.
.. seealso::
:meth:`Digest.new_hasher()`.
"""
import hashlib
if self == DigestHashAlgorithm.SHA256:
return hashlib.sha256()
elif self == DigestHashAlgorithm.SHA512:
return hashlib.sha512()
elif self == DigestHashAlgorithm.MD5:
return hashlib.md5()
else:
raise NotImplementedError(f"hasher() not yet implemented for {str(self)}")
class Digest(BaseModel):
"""
A digest is a checksum / hash of a files contents. These are used to verify
the integrity of files downloaded from Beaker datasets.
"""
value: str
"""
The hex-encoded value of the digest.
"""
algorithm: DigestHashAlgorithm
"""
The algorithm used to create and verify the digest.
"""
def __init__(self, *args, **kwargs):
if len(args) == 1 and "value" not in kwargs:
value = args[0]
if isinstance(value, str) and "algorithm" not in kwargs:
# Assume 'value' is the string-encoded form of a digest.
digest = Digest.from_encoded(value)
kwargs = digest.model_dump()
elif isinstance(value, str):
# Assume 'value' is the hex-encoded hash.
kwargs["value"] = value
elif isinstance(value, bytes):
# Assume 'value' is raw bytes of the hash.
digest = Digest.from_decoded(value, **kwargs)
kwargs = digest.model_dump()
super().__init__(**kwargs)
@field_validator("algorithm")
def _validate_algorithm(cls, v: Union[str, DigestHashAlgorithm]) -> DigestHashAlgorithm:
return DigestHashAlgorithm(v)
def __str__(self) -> str:
return self.encode()
def __hash__(self):
return hash(self.encode())
@classmethod
def from_encoded(cls, encoded: str) -> "Digest":
"""
Initialize a digest from a string encoding of the form ``{ALGORITHM} {ENCODED_STRING}``,
e.g. ``SHA256 iA02Sx8UNLYvMi49fDwdGjyy5ssU+ttuN1L4L3/JvZA=``.
:param encoded: The string encoding of the digest.
"""
import base64
import binascii
algorithm, value_b64 = encoded.split(" ", 1)
value_bytes = base64.standard_b64decode(value_b64)
value = binascii.hexlify(value_bytes).decode()
return cls(value=value, algorithm=DigestHashAlgorithm(algorithm))
@classmethod
def from_decoded(cls, decoded: bytes, algorithm: Union[str, DigestHashAlgorithm]) -> "Digest":
"""
Initialize a digest from raw bytes.
:param decoded: The raw bytes of the digest.
:param algorithm: The algorithm used to produce the bytes of the digest
from the contents of the corresponding file.
"""
import binascii
value = binascii.hexlify(decoded).decode()
return Digest(value=value, algorithm=DigestHashAlgorithm(algorithm))
def encode(self) -> str:
"""
Encode the digest into its string form.
This is the inverse of :meth:`.from_encoded()`.
"""
import base64
import binascii
value_bytes = binascii.unhexlify(self.value)
value_b64 = base64.standard_b64encode(value_bytes).decode()
return f"{str(self.algorithm)} {value_b64}"
def decode(self) -> bytes:
"""
Decode a digest into its raw bytes form.
This is the inverse of :meth:`.from_decoded()`.
"""
import binascii
return binascii.unhexlify(self.value)
def new_hasher(self):
"""
Get a fresh :mod:`hasher <hashlib>` object for the given algorithm.
.. seealso::
:meth:`DigestHashAlgorithm.hasher()`.
"""
return DigestHashAlgorithm(self.algorithm).hasher()
class FileInfo(BaseModel, arbitrary_types_allowed=True):
path: str
"""
The path of the file within the dataset.
"""
updated: datetime
"""
The time that the file was last updated.
"""
digest: Optional[Digest] = None
"""
The digest of the contents of the file.
"""
size: Optional[int] = None
"""
The size of the file in bytes, if known.
"""
IGNORE_FIELDS = {"url"}
@field_validator("digest", mode="before")
def _validate_digest(cls, v: Union[str, Digest, None]) -> Optional[Digest]:
if isinstance(v, Digest):
return v
elif isinstance(v, str):
return Digest.from_encoded(v)
elif isinstance(v, dict):
return Digest(**v)
else:
raise ValueError(f"Unexpected value for 'digest': {v}")
class DatasetsPage(BasePage[Dataset]):
data: Tuple[Dataset, ...]
class DatasetInfoPage(BasePage[FileInfo]):
data: Tuple[FileInfo, ...]
class DatasetInfo(BaseModel):
page: DatasetInfoPage
size: DatasetSize
class DatasetSpec(BaseModel):
workspace: Optional[str] = None
description: Optional[str] = None
class DatasetPatch(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
commit: Optional[bool] = None
class DatasetSort(StrEnum):
created = "created"
author = "author"
dataset_name = "name"
dataset_name_or_description = "nameOrDescription"
| beaker-py-main | beaker/data_model/dataset.py |
from typing import Any, ClassVar, Dict, Optional, Set, Type
from pydantic import BaseModel as _BaseModel
from pydantic import root_validator, validator
from ..util import issue_data_model_warning, to_snake_case
def field_validator(*fields: str, mode: str = "after"):
return validator(*fields, pre=mode == "before")
def model_validator(mode: str = "after"):
return root_validator(pre=mode == "before") # type: ignore
class BaseModelV1(_BaseModel):
"""
The Pydantic v1 base class for all Beaker data models.
"""
class Config:
validate_assignment = True
use_enum_values = True
frozen = True
extra = "ignore"
IGNORE_FIELDS: ClassVar[Set[str]] = set()
@root_validator(pre=True)
def _validate_and_rename_to_snake_case( # type: ignore
cls: Type["BaseModelV1"], values: Dict[str, Any] # type: ignore
) -> Dict[str, Any]:
"""
Raw data from the Beaker server will use lower camel case.
"""
# In some cases we get an instance instead of a dict.
# We'll just punt there and hope for the best.
if not isinstance(values, dict):
return values
as_snake_case = {to_snake_case(k): v for k, v in values.items()}
for key, value in as_snake_case.items():
if (
cls.__config__.extra != "allow" # type: ignore
and key not in cls.__fields__ # type: ignore
and key not in cls.IGNORE_FIELDS
):
issue_data_model_warning(cls, key, value)
return as_snake_case
def model_copy(self, update: Optional[Dict[str, Any]] = None, deep: bool = False):
return self.copy(update=update, deep=deep)
def model_dump(self, *args, **kwargs):
return self.dict(*args, **kwargs)
| beaker-py-main | beaker/data_model/_base_v1.py |
from datetime import datetime
from typing import List, Optional, Tuple
from pydantic import Field
from .account import Account
from .base import BaseModel, BasePage, MappedSequence, StrEnum
from .job import Job
from .workspace import WorkspaceRef
__all__ = ["Experiment", "Task", "Tasks", "ExperimentsPage", "ExperimentPatch", "ExperimentSort"]
class Experiment(BaseModel):
id: str
name: Optional[str] = None
full_name: Optional[str] = None
description: Optional[str] = None
author: Account
created: datetime
workspace_ref: WorkspaceRef
jobs: Tuple[Job, ...] = Field(default_factory=tuple)
@property
def display_name(self) -> str:
return self.name if self.name is not None else self.id
@property
def workspace(self) -> WorkspaceRef:
return self.workspace_ref
class Task(BaseModel):
id: str
name: Optional[str] = None
experiment_id: str
author: Account
created: datetime
schedulable: bool = False
jobs: Tuple[Job, ...] = Field(default_factory=tuple)
owner: Optional[Account] = None
replica_rank: Optional[int] = None
@property
def display_name(self) -> str:
return self.name if self.name is not None else self.id
@property
def latest_job(self) -> Optional[Job]:
if not self.jobs:
return None
return sorted(self.jobs, key=lambda job: job.status.created)[-1]
class Tasks(MappedSequence[Task]):
"""
A sequence of :class:`Task` that also behaves like a mapping of task names to tasks,
i.e. you can use ``get()`` or ``__getitem__()`` with the name of the task.
"""
def __init__(self, tasks: List[Task]):
super().__init__(tasks, {task.name: task for task in tasks if task.name is not None})
class ExperimentsPage(BasePage[Experiment]):
data: Tuple[Experiment, ...]
class ExperimentPatch(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
class ExperimentSort(StrEnum):
created = "created"
author = "author"
experiment_name = "name"
experiment_name_or_description = "nameOrDescription"
| beaker-py-main | beaker/data_model/experiment.py |
from datetime import datetime
from .base import BaseModel
__all__ = ["Secret"]
class Secret(BaseModel):
name: str
created: datetime
updated: datetime
| beaker-py-main | beaker/data_model/secret.py |
from datetime import datetime
from typing import Optional, Tuple
from .base import BaseModel, StrEnum, field_validator
from .node import NodeResources, NodeUtilization
__all__ = ["ClusterStatus", "Cluster", "ClusterUtilization", "ClusterSpec", "ClusterPatch"]
class ClusterStatus(StrEnum):
"""
Current status of a cluster.
"""
pending = "pending"
active = "active"
terminated = "terminated"
failed = "failed"
class Cluster(BaseModel):
id: str
name: str
full_name: str
created: datetime
autoscale: bool
capacity: int
preemptible: bool
status: ClusterStatus
status_message: Optional[str] = None
node_spec: Optional[NodeResources] = None
"""
The requested node configuration.
"""
node_shape: Optional[NodeResources] = None
"""
The actual node configuration.
"""
node_cost: Optional[str] = None
validated: Optional[datetime] = None
user_restrictions: Optional[Tuple[str, ...]] = None
allow_preemptible_restriction_exceptions: Optional[bool] = None
compute_source: Optional[str] = None
@field_validator("validated")
def _validate_datetime(cls, v: Optional[datetime]) -> Optional[datetime]:
if v is not None and v.year == 1:
return None
return v
@field_validator("node_spec")
def _validate_node_spec(cls, v: Optional[NodeResources]) -> Optional[NodeResources]:
if v is not None and not v.to_json():
return None
return v
@property
def is_cloud(self) -> bool:
"""
Returns ``True`` is the cluster is a cloud cluster, otherwise ``False``.
"""
return self.node_shape is not None and self.node_spec is not None
@property
def is_active(self) -> bool:
"""
Returns ``True`` if the cluster is ready to be used.
"""
return not self.is_cloud or self.status == ClusterStatus.active
class ClusterUtilization(BaseModel):
cluster: Cluster
running_jobs: int
queued_jobs: int
running_preemptible_jobs: int
nodes: Tuple[NodeUtilization, ...]
@property
def id(self) -> str:
return self.cluster.id
class ClusterSpec(BaseModel):
name: str
capacity: int
preemptible: bool
spec: NodeResources
class ClusterPatch(BaseModel):
capacity: int
| beaker-py-main | beaker/data_model/cluster.py |
from datetime import datetime
from typing import List, Optional, Tuple
from .account import Account
from .base import BaseModel, BasePage, StrEnum
from .workspace import WorkspaceRef
__all__ = [
"Group",
"GroupSpec",
"GroupParameterType",
"GroupParameter",
"GroupPatch",
"GroupsPage",
"GroupSort",
]
class Group(BaseModel):
id: str
name: Optional[str] = None
full_name: Optional[str] = None
owner: Account
author: Account
created: datetime
modified: datetime
workspace_ref: Optional[WorkspaceRef] = None
description: Optional[str] = None
@property
def workspace(self) -> Optional[WorkspaceRef]:
return self.workspace_ref
class GroupSpec(BaseModel):
workspace: Optional[str] = None
name: Optional[str] = None
description: Optional[str] = None
experiments: Optional[List[str]] = None
class GroupParameterType(StrEnum):
metric = "metric"
env = "env"
class GroupParameter(BaseModel):
type: GroupParameterType
name: str
class GroupPatch(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
add_experiments: Optional[List[str]] = None
remove_experiments: Optional[List[str]] = None
parameters: Optional[List[GroupParameter]] = None
class GroupsPage(BasePage[Group]):
data: Tuple[Group, ...]
class GroupSort(StrEnum):
created = "created"
modified = "modified"
author = "author"
group_name = "name"
group_name_or_description = "nameOrDescription"
| beaker-py-main | beaker/data_model/group.py |
from datetime import datetime
from typing import Optional
from .base import BaseModel
__all__ = ["NodeResources", "Node", "NodeUtilization"]
class NodeResources(BaseModel):
cpu_count: Optional[float] = None
memory: Optional[str] = None
gpu_count: Optional[int] = None
gpu_type: Optional[str] = None
class Node(BaseModel):
id: str
hostname: str
created: datetime
limits: NodeResources
expiry: Optional[datetime] = None
cordoned: Optional[datetime] = None
cordon_reason: Optional[str] = None
cordon_agent_id: Optional[str] = None
cluster_id: Optional[str] = None
account_id: Optional[str] = None
class NodeUtilization(BaseModel):
id: str
hostname: str
limits: NodeResources
running_jobs: int
running_preemptible_jobs: int
used: NodeResources
free: NodeResources
cordoned: bool = False
| beaker-py-main | beaker/data_model/node.py |
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from .account import Account
from .base import BaseModel, BasePage, StrEnum
__all__ = [
"WorkspaceSize",
"Workspace",
"WorkspaceRef",
"WorkspacePage",
"WorkspaceSpec",
"WorkspaceTransferSpec",
"Permission",
"WorkspacePermissions",
"WorkspacePatch",
"WorkspacePermissionsPatch",
"WorkspaceClearResult",
"WorkspaceSort",
]
class WorkspaceSize(BaseModel):
datasets: int
experiments: int
groups: int
images: int
class Workspace(BaseModel):
id: str
name: str
full_name: str
description: Optional[str] = None
size: WorkspaceSize
owner: Account
author: Account
created: datetime
modified: datetime
archived: bool = False
class WorkspaceRef(BaseModel):
id: str
name: str
full_name: str
class WorkspacePage(BasePage[Workspace]):
data: Tuple[Workspace, ...]
org: Optional[str] = None
class WorkspaceSpec(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
public: bool = False
org: Optional[str] = None
class WorkspaceTransferSpec(BaseModel):
ids: List[str]
class Permission(StrEnum):
"""
Workspace permission levels.
"""
no_permission = "none"
read = "read"
write = "write"
full_control = "all"
class WorkspacePermissions(BaseModel):
requester_auth: str
public: bool
authorizations: Optional[Dict[str, Permission]] = None
"""
A dictionary of account IDs to authorizations.
"""
class WorkspacePatch(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
archive: Optional[bool] = None
class WorkspacePermissionsPatch(BaseModel):
public: Optional[bool] = None
authorizations: Optional[Dict[str, Permission]] = None
class WorkspaceClearResult(BaseModel):
groups_deleted: int = 0
experiments_deleted: int = 0
images_deleted: int = 0
datasets_deleted: int = 0
secrets_deleted: int = 0
class WorkspaceSort(StrEnum):
created = "created"
modified = "modified"
workspace_name = "name"
| beaker-py-main | beaker/data_model/workspace.py |
from datetime import datetime
from typing import Optional, Tuple
from .account import Account
from .base import BaseModel, BasePage, StrEnum, field_validator
from .workspace import WorkspaceRef
__all__ = [
"Image",
"ImagesPage",
"ImageRepoAuth",
"ImageRepo",
"DockerLayerProgress",
"DockerLayerUploadStatus",
"DockerLayerDownloadStatus",
"DockerLayerUploadState",
"DockerLayerDownloadState",
"ImageSpec",
"ImagePatch",
"ImageSort",
]
class Image(BaseModel):
id: str
owner: Account
author: Account
created: datetime
workspace_ref: WorkspaceRef
original_tag: Optional[str] = None
docker_tag: Optional[str] = None
name: Optional[str] = None
full_name: Optional[str] = None
description: Optional[str] = None
committed: Optional[datetime] = None
size: Optional[int] = None
@property
def display_name(self) -> str:
return self.name if self.name is not None else self.id
@property
def workspace(self) -> WorkspaceRef:
return self.workspace_ref
@field_validator("committed")
def _validate_datetime(cls, v: Optional[datetime]) -> Optional[datetime]:
if v is not None and v.year == 1:
return None
return v
class ImagesPage(BasePage[Image]):
data: Tuple[Image, ...]
class ImageRepoAuth(BaseModel):
user: str
password: str
server_address: str
class ImageRepo(BaseModel):
image_tag: str
auth: ImageRepoAuth
class DockerLayerProgress(BaseModel):
current: Optional[int] = None
total: Optional[int] = None
class DockerLayerUploadStatus(StrEnum):
preparing = "preparing"
waiting = "waiting"
pushing = "pushing"
pushed = "pushed"
already_exists = "layer already exists"
class DockerLayerDownloadStatus(StrEnum):
waiting = "waiting"
downloading = "downloading"
download_complete = "download complete"
verifying_checksum = "verifying checksum"
extracting = "extracting"
pull_complete = "pull complete"
already_exists = "already exists"
class DockerLayerUploadState(BaseModel):
id: str
status: DockerLayerUploadStatus
progress_detail: DockerLayerProgress
progress: Optional[str] = None
@field_validator("status", mode="before")
def _validate_status(cls, v: str) -> str:
return v.lower()
class DockerLayerDownloadState(BaseModel):
id: str
status: DockerLayerDownloadStatus
progress_detail: DockerLayerProgress
progress: Optional[str] = None
@field_validator("status", mode="before")
def _validate_status(cls, v: str) -> str:
return v.lower()
class ImageSpec(BaseModel):
workspace: Optional[str] = None
image_id: Optional[str] = None
image_tag: Optional[str] = None
description: Optional[str] = None
class ImagePatch(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
commit: Optional[bool] = None
class ImageSort(StrEnum):
created = "created"
author = "author"
image_name = "name"
| beaker-py-main | beaker/data_model/image.py |
import logging
from enum import Enum
from typing import (
Any,
Dict,
Generic,
Iterator,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from pydantic import ValidationError
from ..util import to_lower_camel, to_snake_case
try:
from pydantic import field_validator, model_validator
from ._base_v2 import BaseModelV2 as _BaseModel
except ImportError:
from ._base_v1 import BaseModelV1 as _BaseModel # type: ignore
from ._base_v1 import field_validator, model_validator # type: ignore
T = TypeVar("T")
logger = logging.getLogger("beaker")
__all__ = [
"BaseModel",
"MappedSequence",
"StrEnum",
"IntEnum",
"BasePage",
"field_validator",
"model_validator",
]
class BaseModel(_BaseModel): # type: ignore
"""
The base class for all Beaker data models.
"""
def __str__(self) -> str:
return self.__repr__()
def __getitem__(self, key):
try:
return self.model_dump()[key] # type: ignore
except KeyError:
if not key.islower():
snake_case_key = to_snake_case(key)
try:
return self.model_dump()[snake_case_key] # type: ignore
except KeyError:
pass
raise
@classmethod
def from_json(cls: Type[T], json_data: Dict[str, Any]) -> T:
try:
return cls(**json_data)
except ValidationError:
logger.error("Error validating raw JSON data for %s: %s", cls.__name__, json_data)
raise
def to_json(self) -> Dict[str, Any]:
return self.jsonify(self)
@classmethod
def jsonify(cls, x: Any) -> Any:
if isinstance(x, BaseModel):
return {
to_lower_camel(key): cls.jsonify(value) for key, value in x if value is not None # type: ignore
}
elif isinstance(x, Enum):
return cls.jsonify(x.value)
elif isinstance(x, (str, float, int, bool)):
return x
elif isinstance(x, dict):
return {key: cls.jsonify(value) for key, value in x.items()}
elif isinstance(x, (list, tuple, set)):
return [cls.jsonify(x_i) for x_i in x]
else:
return x
class MappedSequence(Sequence[T], Mapping[str, T]):
def __init__(self, sequence: Sequence[T], mapping: Mapping[str, T]):
self._sequence = sequence
self._mapping = mapping
def __getitem__(self, k) -> Union[T, Sequence[T]]: # type: ignore[override]
if isinstance(k, (int, slice)):
return self._sequence[k]
elif isinstance(k, str):
return self._mapping[k]
else:
raise TypeError("keys must be integers, slices, or strings")
def __contains__(self, k) -> bool:
if isinstance(k, str):
return k in self._mapping
else:
return k in self._sequence
def __iter__(self) -> Iterator[T]:
return iter(self._sequence)
def __len__(self) -> int:
return len(self._sequence)
def keys(self):
return self._mapping.keys()
def values(self):
return self._mapping.values()
class StrEnum(str, Enum):
def __str__(self) -> str:
return self.value
class IntEnum(int, Enum):
def __str__(self) -> str:
return str(self.value)
class BasePage(BaseModel, Generic[T]):
data: Tuple[T, ...]
next_cursor: Optional[str] = None
next: Optional[str] = None
| beaker-py-main | beaker/data_model/base.py |
from typing import Optional
from .base import BaseModel
__all__ = ["Account"]
class Account(BaseModel):
id: str
name: str
display_name: str
institution: Optional[str] = None
pronouns: Optional[str] = None
email: Optional[str] = None
| beaker-py-main | beaker/data_model/account.py |
from typing import List, Optional, Union
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
class OrganizationClient(ServiceClient):
"""
Accessed via :data:`Beaker.organization <beaker.Beaker.organization>`.
"""
def get(self, org: Optional[str] = None) -> Organization:
"""
Get information about an organization.
:param org: The organization name or ID. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
org = org or self.config.default_org
if org is None:
raise OrganizationNotSet("'org' argument required since default org not set")
return Organization.from_json(
self.request(
f"orgs/{self.url_quote(org)}",
method="GET",
exceptions_for_status={404: OrganizationNotFound(org)},
).json()
)
def add_member(
self, account: Union[str, Account], org: Optional[Union[str, Organization]] = None
) -> OrganizationMember:
"""
Add an account to an organization.
:param account: The account name or object.
:param org: The organization name or object. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises AccountNotFound: If the account doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
org = self.resolve_org(org)
account_name = account if isinstance(account, str) else account.name
self.request(
f"orgs/{self.url_quote(org.name)}/members/{account_name}",
method="PUT",
exceptions_for_status={404: AccountNotFound(account_name)},
)
return self.get_member(account_name, org=org)
def get_member(
self, account: Union[str, Account], org: Optional[Union[str, Organization]] = None
) -> OrganizationMember:
"""
Get information about an organization member.
:param account: The account name or object.
:param org: The organization name or object. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises AccountNotFound: If the account doesn't exist or isn't a member of the org.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
org = self.resolve_org(org)
account_name = account if isinstance(account, str) else account.name
return OrganizationMember.from_json(
self.request(
f"orgs/{self.url_quote(org.name)}/members/{account_name}",
method="GET",
exceptions_for_status={404: AccountNotFound(account_name)},
).json()
)
def list_members(self, org: Optional[Union[str, Organization]] = None) -> List[Account]:
"""
List members of an organization.
:param org: The organization name or object. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
org = self.resolve_org(org)
return [
Account.from_json(d)
for d in self.request(
f"orgs/{self.url_quote(org.name)}/members",
method="GET",
exceptions_for_status={404: OrganizationNotFound(org.name)},
).json()["data"]
]
def remove_member(
self, account: Union[str, Account], org: Optional[Union[str, Organization]] = None
):
"""
Remove a member from an organization.
:param account: The account name or object.
:param org: The organization name or object. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
org = self.resolve_org(org)
account_name = account if isinstance(account, str) else account.name
self.request(
f"orgs/{self.url_quote(org.name)}/members/{account_name}",
method="DELETE",
exceptions_for_status={404: AccountNotFound(account_name)},
)
| beaker-py-main | beaker/services/organization.py |
import time
from collections import defaultdict
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Set, Union
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
if TYPE_CHECKING:
from rich.progress import Progress, TaskID
class JobClient(ServiceClient):
"""
Accessed via :data:`Beaker.job <beaker.Beaker.job>`.
"""
def get(self, job_id: str) -> Job:
"""
Get information about a job.
:param job_id: The ID of the Beaker job.
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return Job.from_json(
self.request(
f"jobs/{job_id}",
exceptions_for_status={404: JobNotFound(job_id)},
).json()
)
def list(
self,
cluster: Optional[Union[str, Cluster]] = None,
experiment: Optional[Union[str, Experiment]] = None,
finalized: bool = False,
kind: Optional[JobKind] = None,
node: Optional[Union[str, Node]] = None,
) -> List[Job]:
"""
List jobs.
:param cluster: List jobs on a cluster.
:param experiment: List jobs in an experiment.
:param finalized: List only finalized or non-finalized jobs.
:param kind: List jobs of a certain kind.
:param node: List jobs on a node.
.. important::
Either ``cluster``, ``experiment``, or ``node`` must be specified.
If ``node`` is specified, neither ``cluster`` nor ``experiment`` can be
specified.
:raises ValueError: If the arguments are invalid, e.g. both ``node`` and
``cluster`` are specified.
:raises ClusterNotFound: If the specified cluster doesn't exist.
:raises ExperimentNotFound: If the specified experiment doesn't exist.
:raises NodeNotFound: If the specified node doesn't exist.
"""
# Validate arguments.
if node is not None:
if cluster is not None:
raise ValueError("You cannot specify both 'node' and 'cluster'")
if experiment is not None:
raise ValueError("You cannot specify both 'node' and 'experiment'")
else:
if cluster is None and experiment is None:
raise ValueError("You must specify one of 'node', 'experiment', or 'cluster'")
jobs: List[Job] = []
# Build request options.
request_opts: Dict[str, Any] = {}
if cluster is not None:
cluster_id = (
cluster.id if isinstance(cluster, Cluster) else self.beaker.cluster.get(cluster).id
)
request_opts["cluster"] = cluster_id
if node is not None:
node_id = node.id if isinstance(node, Node) else self.beaker.node.get(node).id
request_opts["node"] = node_id
if experiment is not None:
exp_id = (
experiment.id
if isinstance(experiment, Experiment)
else self.beaker.experiment.get(experiment).id
)
request_opts["experiment"] = exp_id
if kind is not None:
request_opts["kind"] = kind.value
request_opts["finalized"] = finalized
# Gather jobs, page by page.
while True:
page = Jobs.from_json(self.request("jobs", method="GET", query=request_opts).json())
if page.data:
jobs.extend(page.data)
if not page.next and not page.next_cursor:
break
else:
request_opts["cursor"] = page.next or page.next_cursor
return jobs
def logs(
self,
job: Union[str, Job],
quiet: bool = False,
since: Optional[Union[str, datetime, timedelta]] = None,
) -> Generator[bytes, None, None]:
"""
Download the logs for a job.
Returns a generator with the streaming bytes from the download.
The generator should be exhausted, otherwise the logs downloaded will be incomplete.
.. seealso::
:meth:`Beaker.experiment.logs() <ExperimentClient.logs>`
.. seealso::
:meth:`follow()`
:param job: The Beaker job ID or object.
:param quiet: If ``True``, progress won't be displayed.
:param since: Only show logs since a particular time. Could be a :class:`~datetime.datetime` object
(naive datetimes will be treated as UTC), a timestamp string in the form of RFC 3339
(e.g. "2013-01-02T13:23:37Z"), or a relative time
(e.g. a :class:`~datetime.timedelta` or a string like "42m").
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job_id = job.id if isinstance(job, Job) else job
opts = {}
if since is not None:
from ..util import format_since
opts["since"] = format_since(since)
response = self.request(
f"jobs/{job_id}/logs",
method="GET",
exceptions_for_status={404: JobNotFound(job_id)},
stream=True,
query=opts,
)
# TODO: currently beaker doesn't provide the Content-Length header, update this if they do.
# content_length = response.headers.get("Content-Length")
# total = int(content_length) if content_length is not None else None
from ..progress import get_logs_progress
with get_logs_progress(quiet) as progress:
task_id = progress.add_task("Downloading:")
total = 0
for chunk in response.iter_content(chunk_size=1024):
if chunk:
advance = len(chunk)
total += advance
progress.update(task_id, total=total + 1, advance=advance)
yield chunk
def metrics(self, job: Union[str, Job]) -> Optional[Dict[str, Any]]:
"""
Get the metrics from a job.
.. seealso::
:meth:`Beaker.experiment.metrics() <ExperimentClient.metrics>`
:param job: The Beaker job ID or object.
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job_id = job.id if isinstance(job, Job) else job
return self.request(
f"jobs/{job_id}/results",
method="GET",
exceptions_for_status={404: JobNotFound(job_id)},
).json()["metrics"]
def results(self, job: Union[str, Job]) -> Optional[Dataset]:
"""
Get the results from a job.
.. seealso::
:meth:`Beaker.experiment.results() <ExperimentClient.results>`
:param job: The Beaker job ID or object.
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job = job if isinstance(job, Job) else self.get(job)
if job.result is None or job.result.beaker is None:
return None
else:
try:
return self.beaker.dataset.get(job.result.beaker)
except DatasetNotFound:
return None
def finalize(self, job: Union[str, Job]) -> Job:
"""
Finalize a job.
:param job: The Beaker job ID or object.
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job_id = job.id if isinstance(job, Job) else job
return Job.from_json(
self.request(
f"jobs/{job_id}",
method="PATCH",
exceptions_for_status={404: JobNotFound(job_id)},
data=JobPatch(status=JobStatusUpdate(finalized=True)),
).json()
)
def preempt(self, job: Union[str, Job]) -> Job:
"""
Preempt a job.
:param job: The Beaker job ID or object.
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job_id = job.id if isinstance(job, Job) else job
return Job.from_json(
self.request(
f"jobs/{job_id}",
method="PATCH",
exceptions_for_status={404: JobNotFound(job_id)},
data=JobPatch(
status=JobStatusUpdate(
canceled=True,
canceled_code=CanceledCode.user_preemption,
canceled_for=f"Preempted by user '{self.beaker.account.name}'",
)
),
).json()
)
def stop(self, job: Union[str, Job]) -> Job:
"""
Stop a job.
:param job: The Beaker job ID or object.
:raises JobNotFound: If the job can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job_id = job.id if isinstance(job, Job) else job
return Job.from_json(
self.request(
f"jobs/{job_id}",
method="PATCH",
exceptions_for_status={404: JobNotFound(job_id)},
data=JobPatch(
status=JobStatusUpdate(
canceled=True, canceled_for=f"Stopped by user '{self.beaker.account.name}'"
)
),
).json()
)
def wait_for(
self,
*jobs: Union[str, Job],
timeout: Optional[float] = None,
poll_interval: float = 1.0,
quiet: bool = False,
strict: bool = False,
) -> List[Job]:
"""
Wait for jobs to finalize, returning the completed jobs as a list in the same order
they were given as input.
.. caution::
This method is experimental and may change or be removed in future releases.
.. seealso::
:meth:`as_completed()`
.. seealso::
:meth:`follow()`
.. seealso::
:meth:`Beaker.experiment.wait_for() <ExperimentClient.wait_for>`
:param jobs: Job ID, name, or object.
:param timeout: Maximum amount of time to wait for (in seconds).
:param poll_interval: Time to wait between polling each job's status (in seconds).
:param quiet: If ``True``, progress won't be displayed.
:param strict: If ``True``, the exit code of each job will be checked, and a
:class:`~beaker.exceptions.JobFailedError` will be raised for non-zero exit codes.
:raises JobNotFound: If any job can't be found.
:raises JobTimeoutError: If the ``timeout`` expires.
:raises DuplicateJobError: If the same job is given as an argument more than once.
:raises JobFailedError: If ``strict=True`` and any job finishes with a non-zero exit code.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
job_id_to_position: Dict[str, int] = {}
jobs_to_wait_on: List[Job] = []
for i, job_ in enumerate(jobs):
job = job_ if isinstance(job_, Job) else self.get(job_)
jobs_to_wait_on.append(job)
if job.id in job_id_to_position:
raise DuplicateJobError(job.display_name)
job_id_to_position[job.id] = i
completed_jobs: List[Job] = list(
self.as_completed(
*jobs_to_wait_on,
timeout=timeout,
poll_interval=poll_interval,
quiet=quiet,
strict=strict,
)
)
return sorted(completed_jobs, key=lambda job: job_id_to_position[job.id])
def as_completed(
self,
*jobs: Union[str, Job],
timeout: Optional[float] = None,
poll_interval: float = 1.0,
quiet: bool = False,
strict: bool = False,
) -> Generator[Job, None, None]:
"""
Wait for jobs to finalize, returning an iterator that yields jobs as they complete.
.. caution::
This method is experimental and may change or be removed in future releases.
.. seealso::
:meth:`wait_for()`
.. seealso::
:meth:`follow()`
.. seealso::
:meth:`Beaker.experiment.as_completed() <ExperimentClient.as_completed>`
:param jobs: Job ID, name, or object.
:param timeout: Maximum amount of time to wait for (in seconds).
:param poll_interval: Time to wait between polling each job's status (in seconds).
:param quiet: If ``True``, progress won't be displayed.
:param strict: If ``True``, the exit code of each job will be checked, and a
:class:`~beaker.exceptions.JobFailedError` will be raised for non-zero exit codes.
:raises JobNotFound: If any job can't be found.
:raises JobTimeoutError: If the ``timeout`` expires.
:raises DuplicateJobError: If the same job is given as an argument more than once.
:raises JobFailedError: If ``strict=True`` and any job finishes with a non-zero exit code.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
yield from self._as_completed(
*jobs,
timeout=timeout,
poll_interval=poll_interval,
quiet=quiet,
strict=strict,
)
def follow(
self,
job: Union[str, Job],
timeout: Optional[float] = None,
strict: bool = False,
include_timestamps: bool = True,
) -> Generator[bytes, None, Job]:
"""
Follow a job live, creating a generator that produces log lines (as bytes) from the job
as they become available. The return value of the generator is the finalized
:class:`~beaker.data_model.job.Job` object.
.. seealso::
:meth:`logs()`
.. seealso::
:meth:`wait_for()`
.. seealso::
:meth:`as_completed()`
.. seealso::
:meth:`Beaker.experiment.follow() <ExperimentClient.follow>`
:param job: Job ID, name, or object.
:param timeout: Maximum amount of time to follow job for (in seconds).
:param strict: If ``True``, the exit code of each job will be checked, and a
:class:`~beaker.exceptions.JobFailedError` will be raised for non-zero exit codes.
:param include_timestamps: If ``True`` (the default) timestamps from the Beaker logs
will be included in the output.
:raises JobNotFound: If any job can't be found.
:raises JobTimeoutError: If the ``timeout`` expires.
:raises JobFailedError: If ``strict=True`` and any job finishes with a non-zero exit code.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
:examples:
>>> job = beaker.experiment.latest_job(hello_world_experiment_name)
>>> for line in beaker.job.follow(job):
... # Every log line from Beaker starts with an RFC 3339 UTC timestamp
... # (e.g. '2021-12-07T19:30:24.637600011Z'). If we don't want to print
... # the timestamps we can split them off like this:
... line = line[line.find(b"Z ")+2:]
... print(line.decode(errors="ignore"), end="")
<BLANKLINE>
Hello from Docker!
This message shows that your installation appears to be working correctly.
<BLANKLINE>
...
"""
from ..util import log_and_wait, split_timestamp
if timeout is not None and timeout <= 0:
raise ValueError("'timeout' must be a positive number")
start = time.monotonic()
last_timestamp: Optional[str] = None
lines_for_timestamp: Dict[str, Set[bytes]] = defaultdict(set)
def get_line_to_yield(line: bytes) -> Optional[bytes]:
nonlocal last_timestamp, lines_for_timestamp
timestamp = split_timestamp(line)
if timestamp is not None and timestamp != last_timestamp:
last_timestamp = timestamp
if include_timestamps:
return line
else:
return line[len(timestamp) + 1 :]
elif timestamp is None and last_timestamp is not None:
if line not in lines_for_timestamp[last_timestamp]:
lines_for_timestamp[last_timestamp].add(line)
return line
return None
def pull_logs_since(updated_job: Job, final: bool = False):
retries = 0
while True:
try:
buffer = b""
for chunk in self.logs(updated_job, quiet=True, since=last_timestamp):
lines = (buffer + chunk).splitlines(keepends=True)
if chunk.endswith(b"\n"):
buffer = b""
elif lines:
# Last line in chunk is not a complete line.
lines, buffer = lines[:-1], lines[-1]
for line in lines:
line_to_yield = get_line_to_yield(line)
if line_to_yield is not None:
yield line_to_yield
if final and buffer:
line_to_yield = get_line_to_yield(buffer + b"\n")
if line_to_yield is not None:
yield line_to_yield
break
except RequestException as err:
if retries < self.beaker.MAX_RETRIES:
log_and_wait(retries, err)
retries += 1
else:
raise
updated_job: Job
while True:
updated_job = self.get(job.id if isinstance(job, Job) else job)
# Pull and yield log lines.
for line in pull_logs_since(updated_job):
yield line
# Check status of job, finish if job is no-longer running.
if updated_job.is_finalized:
break
# Check timeout if we're still waiting for job to complete.
if timeout is not None and time.monotonic() - start >= timeout:
raise JobTimeoutError(updated_job.id)
time.sleep(1.0)
for line in pull_logs_since(updated_job, final=True):
yield line
if strict:
updated_job.check()
return updated_job
def _as_completed(
self,
*jobs: Union[str, Job],
timeout: Optional[float] = None,
poll_interval: float = 1.0,
quiet: bool = False,
strict: bool = False,
_progress: Optional["Progress"] = None,
) -> Generator[Job, None, None]:
if timeout is not None and timeout <= 0:
raise ValueError("'timeout' must be a positive number")
exp_id_to_name: Dict[str, str] = {}
task_id_to_name: Dict[str, str] = {}
def display_name(j: Job) -> str:
if j.execution is None:
return f"[i]{j.id}[/]"
else:
if j.execution.experiment not in exp_id_to_name:
exp = self.beaker.experiment.get(j.execution.experiment)
exp_id_to_name[exp.id] = exp.name if exp.name is not None else exp.id
if j.execution.task not in task_id_to_name:
for task in self.beaker.experiment.tasks(j.execution.experiment):
if task.id not in task_id_to_name:
task_id_to_name[task.id] = (
task.name if task.name is not None else task.id
)
return (
f"[b cyan]{exp_id_to_name[j.execution.experiment]}[/] "
f"\N{rightwards arrow} [i]{task_id_to_name[j.execution.task]}[/]"
)
from ..progress import get_jobs_progress
job_ids: List[str] = []
start = time.monotonic()
owned_progress = _progress is None
progress = _progress or get_jobs_progress(quiet)
if owned_progress:
progress.start()
try:
job_id_to_progress_task: Dict[str, "TaskID"] = {}
for job_ in jobs:
job = job_ if isinstance(job_, Job) else self.get(job_)
job_ids.append(job.id)
if job.id in job_id_to_progress_task:
raise DuplicateJobError(job.id)
job_id_to_progress_task[job.id] = progress.add_task(f"{display_name(job)}:")
polls = 0
while True:
if not job_id_to_progress_task:
yield from []
return
polls += 1
# Poll each experiment and update the progress line.
for job_id in list(job_id_to_progress_task):
task_id = job_id_to_progress_task[job_id]
job = self.get(job_id)
if not job.is_finalized:
progress.update(task_id, total=polls + 1, advance=1)
else:
# Ensure job was successful if `strict==True`.
if strict:
job.check()
progress.update(
task_id,
total=polls + 1,
completed=polls + 1,
)
progress.stop_task(task_id)
del job_id_to_progress_task[job_id]
yield job
elapsed = time.monotonic() - start
if timeout is not None and elapsed >= timeout:
raise JobTimeoutError
time.sleep(poll_interval)
finally:
if owned_progress:
progress.stop()
def url(self, job: Union[str, Job]) -> str:
job_id = job.id if isinstance(job, Job) else job
return f"{self.config.agent_address}/job/{self.url_quote(job_id)}"
| beaker-py-main | beaker/services/job.py |
from .account import AccountClient
from .cluster import ClusterClient
from .dataset import DatasetClient
from .experiment import ExperimentClient
from .group import GroupClient
from .image import ImageClient
from .job import JobClient
from .node import NodeClient
from .organization import OrganizationClient
from .secret import SecretClient
from .service_client import ServiceClient
from .workspace import WorkspaceClient
__all__ = [
"AccountClient",
"ClusterClient",
"DatasetClient",
"ExperimentClient",
"GroupClient",
"ImageClient",
"JobClient",
"NodeClient",
"OrganizationClient",
"SecretClient",
"ServiceClient",
"WorkspaceClient",
]
| beaker-py-main | beaker/services/__init__.py |
import io
import os
import urllib.parse
from datetime import datetime
from pathlib import Path
from typing import (
TYPE_CHECKING,
ClassVar,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from ..aliases import PathOrStr
from ..data_model import *
from ..exceptions import *
from ..util import log_and_wait, path_is_relative_to, retriable
from .service_client import ServiceClient
if TYPE_CHECKING:
from requests import Response
from rich.progress import Progress, TaskID
is_canceled = None
class DatasetClient(ServiceClient):
"""
Accessed via :data:`Beaker.dataset <beaker.Beaker.dataset>`.
"""
HEADER_UPLOAD_ID = "Upload-ID"
HEADER_UPLOAD_LENGTH = "Upload-Length"
HEADER_UPLOAD_OFFSET = "Upload-Offset"
HEADER_DIGEST = "Digest"
HEADER_LAST_MODIFIED = "Last-Modified"
HEADER_CONTENT_LENGTH = "Content-Length"
REQUEST_SIZE_LIMIT: ClassVar[int] = 32 * 1024 * 1024
DOWNLOAD_CHUNK_SIZE: ClassVar[int] = 10 * 1024
"""
The default buffer size for downloads.
"""
def get(self, dataset: str) -> Dataset:
"""
Get info about a dataset.
:param dataset: The dataset ID or name.
:raises DatasetNotFound: If the dataset can't be found.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
def _get(id: str) -> Dataset:
return Dataset.from_json(
self.request(
f"datasets/{self.url_quote(id)}",
exceptions_for_status={404: DatasetNotFound(self._not_found_err_msg(id))},
).json()
)
try:
# Could be a dataset ID or full name, so we try that first.
return _get(dataset)
except DatasetNotFound:
if "/" not in dataset:
# Try with adding the account name.
try:
return _get(f"{self.beaker.account.name}/{dataset}")
except DatasetNotFound:
pass
# Try searching the default workspace.
if self.config.default_workspace is not None:
matches = self.beaker.workspace.datasets(match=dataset, limit=1)
if matches:
return matches[0]
raise
def create(
self,
name: str,
*sources: PathOrStr,
target: Optional[PathOrStr] = None,
workspace: Optional[str] = None,
description: Optional[str] = None,
force: bool = False,
max_workers: Optional[int] = None,
quiet: bool = False,
commit: bool = True,
strip_paths: bool = False,
) -> Dataset:
"""
Create a dataset with the source file(s).
:param name: The name to assign to the new dataset.
:param sources: Local source files or directories to upload to the dataset.
:param target: If specified, all source files/directories will be uploaded under
a directory of this name.
:param workspace: The workspace to upload the dataset to. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param description: Text description for the dataset.
:param force: If ``True`` and a dataset by the given name already exists, it will be overwritten.
:param max_workers: The maximum number of thread pool workers to use to upload files concurrently.
:param quiet: If ``True``, progress won't be displayed.
:param commit: Whether to commit the dataset after successful upload.
:param strip_paths: If ``True``, all source files and directories will be uploaded under their name,
not their path. E.g. the file "docs/source/index.rst" would be uploaded as just "index.rst",
instead of "docs/source/index.rst".
.. note::
This only applies to source paths that are children of the current working directory.
If a source path is outside of the current working directory, it will always
be uploaded under its name only.
:raises ValueError: If the name is invalid.
:raises DatasetConflict: If a dataset by that name already exists and ``force=False``.
:raises UnexpectedEOFError: If a source is a directory and the contents of one of the directory's files
changes while creating the dataset.
:raises FileNotFoundError: If a source doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
workspace_id = self.resolve_workspace(workspace).id
# Create the dataset.
def make_dataset() -> Dataset:
return Dataset.from_json(
self.request(
"datasets",
method="POST",
query={"name": name},
data=DatasetSpec(workspace=workspace_id, description=description),
exceptions_for_status={409: DatasetConflict(name)},
).json()
)
try:
dataset_info = make_dataset()
except DatasetConflict:
if force:
self.delete(f"{self.beaker.account.whoami().name}/{name}")
dataset_info = make_dataset()
else:
raise
assert dataset_info.storage is not None
# Upload the file(s).
if sources:
self.sync(
dataset_info,
*sources,
target=target,
quiet=quiet,
max_workers=max_workers,
strip_paths=strip_paths,
)
# Commit the dataset.
if commit:
self.commit(dataset_info.id)
# Return info about the dataset.
return self.get(dataset_info.id)
def commit(self, dataset: Union[str, Dataset]) -> Dataset:
"""
Commit the dataset.
:param dataset: The dataset ID, name, or object.
:raises DatasetNotFound: If the dataset can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset_id = self.resolve_dataset(dataset).id
@retriable()
def commit() -> Dataset:
# It's okay to retry this because committing a dataset multiple
# times does nothing.
return Dataset.from_json(
self.request(
f"datasets/{self.url_quote(dataset_id)}",
method="PATCH",
data=DatasetPatch(commit=True),
exceptions_for_status={404: DatasetNotFound(self._not_found_err_msg(dataset))},
).json()
)
return commit()
def fetch(
self,
dataset: Union[str, Dataset],
target: Optional[PathOrStr] = None,
prefix: Optional[str] = None,
force: bool = False,
max_workers: Optional[int] = None,
quiet: bool = False,
validate_checksum: bool = True,
chunk_size: Optional[int] = None,
):
"""
Download a dataset.
:param dataset: The dataset ID, name, or object.
:param target: The target path to download fetched data to. Defaults to ``Path(.)``.
:param prefix: Only download files that start with the given prefix.
:param max_workers: The maximum number of thread pool workers to use to download files concurrently.
:param force: If ``True``, existing local files will be overwritten.
:param quiet: If ``True``, progress won't be displayed.
:param validate_checksum: If ``True``, the checksum of every file downloaded will be verified.
:param chunk_size: The size of the buffer (in bytes) to use while downloading each file.
Defaults to :data:`DOWNLOAD_CHUNK_SIZE`.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetReadError: If the :data:`~beaker.data_model.dataset.Dataset.storage` hasn't been set.
:raises FileExistsError: If ``force=False`` and an existing local file clashes with a file
in the Beaker dataset.
:raises ChecksumFailedError: If ``validate_checksum=True`` and the digest of one of the
downloaded files doesn't match the expected digest.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset = self.resolve_dataset(dataset)
if dataset.storage is None:
# Might need to get dataset again if 'storage' hasn't been set yet.
dataset = self.get(dataset.id)
if dataset.storage is None:
raise DatasetReadError(dataset.id)
dataset_info = DatasetInfo.from_json(
self.request(
f"datasets/{dataset.id}/files",
exceptions_for_status={404: DatasetNotFound(self._not_found_err_msg(dataset.id))},
).json()
)
total_bytes_to_download: int = dataset_info.size.bytes
total_downloaded: int = 0
target = Path(target or Path("."))
target.mkdir(exist_ok=True, parents=True)
from ..progress import get_sized_dataset_fetch_progress
progress = get_sized_dataset_fetch_progress(quiet)
with progress:
bytes_task = progress.add_task("Downloading dataset")
progress.update(bytes_task, total=total_bytes_to_download)
import concurrent.futures
import threading
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
global is_canceled
is_canceled = threading.Event()
download_futures = []
try:
for file_info in dataset_info.page.data:
if prefix is not None and not file_info.path.startswith(prefix):
continue
target_path = target / Path(file_info.path)
if not force and target_path.exists():
raise FileExistsError(file_info.path)
future = executor.submit(
self._download_file,
dataset,
file_info,
target_path,
progress=progress,
task_id=bytes_task,
validate_checksum=validate_checksum,
chunk_size=chunk_size,
)
download_futures.append(future)
for future in concurrent.futures.as_completed(download_futures):
total_downloaded += future.result()
except KeyboardInterrupt:
self.logger.warning("Received KeyboardInterrupt, canceling download workers...")
is_canceled.set() # type: ignore
for future in download_futures:
future.cancel()
executor.shutdown(wait=True)
raise
progress.update(bytes_task, total=total_downloaded, completed=total_downloaded)
def stream_file(
self,
dataset: Union[str, Dataset],
file: Union[str, FileInfo],
offset: int = 0,
length: int = -1,
quiet: bool = False,
validate_checksum: bool = True,
chunk_size: Optional[int] = None,
) -> Generator[bytes, None, None]:
"""
Stream download the contents of a single file from a dataset.
.. seealso::
:meth:`get_file()` is similar but returns the entire contents at once instead of
a generator over the contents.
:param dataset: The dataset ID, name, or object.
:param file: The path of the file within the dataset or the corresponding
:class:`~beaker.data_model.dataset.FileInfo` object.
:param offset: Offset to start from, in bytes.
:param length: Number of bytes to read.
:param quiet: If ``True``, progress won't be displayed.
:param validate_checksum: If ``True``, the checksum of the downloaded bytes will be verified.
:param chunk_size: The size of the buffer (in bytes) to use while downloading each file.
Defaults to :data:`DOWNLOAD_CHUNK_SIZE`.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetReadError: If the :data:`~beaker.data_model.dataset.Dataset.storage` hasn't been set.
:raises FileNotFoundError: If the file doesn't exist in the dataset.
:raises ChecksumFailedError: If ``validate_checksum=True`` and the digest of the downloaded
bytes don't match the expected digest.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
:examples:
>>> total_bytes = 0
>>> with open(tmp_path / "squad-train.arrow", "wb") as f:
... for chunk in beaker.dataset.stream_file(squad_dataset_name, "squad-train.arrow", quiet=True):
... total_bytes += f.write(chunk)
"""
dataset = self.resolve_dataset(dataset, ensure_storage=True)
file_info = file if isinstance(file, FileInfo) else self.file_info(dataset, file)
from ..progress import get_unsized_dataset_fetch_progress
with get_unsized_dataset_fetch_progress(quiet=quiet) as progress:
task_id = progress.add_task("Downloading", total=None)
for bytes_chunk in self._stream_file(
dataset,
file_info,
offset=offset,
length=length,
validate_checksum=validate_checksum,
chunk_size=chunk_size,
):
progress.update(task_id, advance=len(bytes_chunk))
yield bytes_chunk
def get_file(
self,
dataset: Union[str, Dataset],
file: Union[str, FileInfo],
offset: int = 0,
length: int = -1,
quiet: bool = False,
validate_checksum: bool = True,
chunk_size: Optional[int] = None,
) -> bytes:
"""
Download the contents of a single file from a dataset.
.. seealso::
:meth:`stream_file()` is similar but returns a generator over the contents.
:param dataset: The dataset ID, name, or object.
:param file: The path of the file within the dataset or the corresponding
:class:`~beaker.data_model.dataset.FileInfo` object.
:param offset: Offset to start from, in bytes.
:param length: Number of bytes to read.
:param quiet: If ``True``, progress won't be displayed.
:param validate_checksum: If ``True``, the checksum of the downloaded bytes will be verified.
:param chunk_size: The size of the buffer (in bytes) to use while downloading each file.
Defaults to :data:`DOWNLOAD_CHUNK_SIZE`.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetReadError: If the :data:`~beaker.data_model.dataset.Dataset.storage` hasn't been set.
:raises FileNotFoundError: If the file doesn't exist in the dataset.
:raises ChecksumFailedError: If ``validate_checksum=True`` and the digest of the downloaded
bytes don't match the expected digest.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
:examples:
>>> contents = beaker.dataset.get_file(squad_dataset_name, "squad-train.arrow", quiet=True)
"""
@retriable(recoverable_errors=(RequestException, ChecksumFailedError))
def _get_file() -> bytes:
return b"".join(
self.stream_file(
dataset,
file,
offset=offset,
length=length,
quiet=quiet,
validate_checksum=validate_checksum,
chunk_size=chunk_size,
)
)
return _get_file()
def file_info(self, dataset: Union[str, Dataset], file_name: str) -> FileInfo:
"""
Get the :class:`~beaker.data_model.dataset.FileInfo` for a file in a dataset.
:param dataset: The dataset ID, name, or object.
:param file_name: The path of the file within the dataset.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetReadError: If the :data:`~beaker.data_model.dataset.Dataset.storage` hasn't been set.
:raises FileNotFoundError: If the file doesn't exist in the dataset.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset = self.resolve_dataset(dataset, ensure_storage=True)
assert dataset.storage is not None
if dataset.storage.scheme == "fh":
response = self.request(
f"datasets/{dataset.storage.id}/files/{file_name}",
method="HEAD",
token=dataset.storage.token,
base_url=dataset.storage.base_url,
exceptions_for_status={404: FileNotFoundError(file_name)},
)
size_str = response.headers.get(self.HEADER_CONTENT_LENGTH)
size = int(size_str) if size_str else None
return FileInfo(
path=file_name,
digest=Digest.from_encoded(response.headers[self.HEADER_DIGEST]),
updated=datetime.strptime(
response.headers[self.HEADER_LAST_MODIFIED], "%a, %d %b %Y %H:%M:%S %Z"
),
size=size,
)
else:
# TODO (epwalsh): make a HEAD request once Beaker supports that
# (https://github.com/allenai/beaker/issues/2961)
response = self.request(
f"datasets/{dataset.id}/files/{urllib.parse.quote(file_name, safe='')}",
stream=True,
exceptions_for_status={404: FileNotFoundError(file_name)},
)
response.close()
size_str = response.headers.get(self.HEADER_CONTENT_LENGTH)
size = int(size_str) if size_str else None
digest = response.headers.get(self.HEADER_DIGEST)
return FileInfo(
path=file_name,
digest=None if digest is None else Digest.from_encoded(digest),
updated=datetime.strptime(
response.headers[self.HEADER_LAST_MODIFIED], "%a, %d %b %Y %H:%M:%S %Z"
),
size=size,
)
def delete(self, dataset: Union[str, Dataset]):
"""
Delete a dataset.
:param dataset: The dataset ID, name, or object.
:raises DatasetNotFound: If the dataset can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset_id = self.resolve_dataset(dataset).id
self.request(
f"datasets/{self.url_quote(dataset_id)}",
method="DELETE",
exceptions_for_status={404: DatasetNotFound(self._not_found_err_msg(dataset))},
)
def sync(
self,
dataset: Union[str, Dataset],
*sources: PathOrStr,
target: Optional[PathOrStr] = None,
quiet: bool = False,
max_workers: Optional[int] = None,
strip_paths: bool = False,
) -> None:
"""
Sync local files or directories to an uncommitted dataset.
:param dataset: The dataset ID, name, or object.
:param sources: Local source files or directories to upload to the dataset.
:param target: If specified, all source files/directories will be uploaded under
a directory of this name.
:param max_workers: The maximum number of thread pool workers to use to upload files concurrently.
:param quiet: If ``True``, progress won't be displayed.
:param strip_paths: If ``True``, all source files and directories will be uploaded under their name,
not their path. E.g. the file "docs/source/index.rst" would be uploaded as just "index.rst",
instead of "docs/source/index.rst".
.. note::
This only applies to source paths that are children of the current working directory.
If a source path is outside of the current working directory, it will always
be uploaded under its name only.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetWriteError: If the dataset was already committed.
:raises FileNotFoundError: If a source doesn't exist.
:raises UnexpectedEOFError: If a source is a directory and the contents of one of the directory's files
changes while creating the dataset.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset = self.resolve_dataset(dataset)
if dataset.committed is not None:
raise DatasetWriteError(dataset.id)
from ..progress import get_dataset_sync_progress
with get_dataset_sync_progress(quiet) as progress:
bytes_task = progress.add_task("Uploading dataset")
total_bytes = 0
# map source path to (target_path, size)
path_info: Dict[Path, Tuple[Path, int]] = {}
for source in sources:
source = Path(source)
strip_path = strip_paths or not path_is_relative_to(source, ".")
if source.is_file():
target_path = Path(source.name) if strip_path else source
if target is not None:
target_path = Path(str(target)) / target_path
size = source.lstat().st_size
path_info[source] = (target_path, size)
total_bytes += size
elif source.is_dir():
for path in source.glob("**/*"):
if path.is_dir():
continue
target_path = path.relative_to(source) if strip_path else path
if target is not None:
target_path = Path(str(target)) / target_path
size = path.lstat().st_size
if size == 0:
continue
path_info[path] = (target_path, size)
total_bytes += size
else:
raise FileNotFoundError(source)
import concurrent.futures
progress.update(bytes_task, total=total_bytes)
# Now upload.
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Dispatch tasks to thread pool executor.
future_to_path = {}
for path, (target_path, size) in path_info.items():
future = executor.submit(
self._upload_file,
dataset,
size,
path,
target_path,
progress,
bytes_task,
ignore_errors=True,
)
future_to_path[future] = path
# Collect completed tasks.
for future in concurrent.futures.as_completed(future_to_path):
path = future_to_path[future]
original_size = path_info[path][1]
actual_size = future.result()
if actual_size != original_size:
# If the size of the file has changed since we started, adjust total.
total_bytes += actual_size - original_size
progress.update(bytes_task, total=total_bytes)
def upload(
self,
dataset: Union[str, Dataset],
source: bytes,
target: PathOrStr,
quiet: bool = False,
) -> None:
"""
Upload raw bytes to an uncommitted dataset.
:param dataset: The dataset ID, name, or object.
:param source: The raw bytes to upload to the dataset.
:param target: The name to assign to the file for the bytes in the dataset.
:param quiet: If ``True``, progress won't be displayed.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetWriteError: If the dataset was already committed.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset = self.resolve_dataset(dataset)
if dataset.committed is not None:
raise DatasetWriteError(dataset.id)
from ..progress import get_dataset_sync_progress
size = len(source)
with get_dataset_sync_progress(quiet) as progress:
task_id = progress.add_task("Uploading source")
if size is not None:
progress.update(task_id, total=size)
self._upload_file(dataset, size, source, target, progress, task_id)
def ls(self, dataset: Union[str, Dataset], prefix: Optional[str] = None) -> List[FileInfo]:
"""
List files in a dataset.
:param dataset: The dataset ID, name, or object.
:param prefix: An optional path prefix to filter by.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetReadError: If the :data:`~beaker.data_model.dataset.Dataset.storage` hasn't been set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset = self.resolve_dataset(dataset)
query = {} if prefix is None else {"prefix": prefix}
info = DatasetInfo.from_json(
self.request(
f"datasets/{dataset.id}/files",
query=query,
exceptions_for_status={404: DatasetNotFound(self._not_found_err_msg(dataset.id))},
).json()
)
return list(info.page.data)
def size(self, dataset: Union[str, Dataset]) -> int:
"""
Calculate the size of a dataset, in bytes.
:param dataset: The dataset ID, name, or object.
:raises DatasetNotFound: If the dataset can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
dataset = self.resolve_dataset(dataset)
info = DatasetInfo.from_json(
self.request(
f"datasets/{dataset.id}/files",
exceptions_for_status={404: DatasetNotFound(self._not_found_err_msg(dataset.id))},
).json()
)
return info.size.bytes
def rename(self, dataset: Union[str, Dataset], name: str) -> Dataset:
"""
Rename a dataset.
:param dataset: The dataset ID, name, or object.
:param name: The new name of the dataset.
:raises ValueError: If the new name is invalid.
:raises DatasetNotFound: If the dataset can't be found.
:raises DatasetConflict: If a dataset by that name already exists.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
dataset_id = self.resolve_dataset(dataset).id
return Dataset.from_json(
self.request(
f"datasets/{self.url_quote(dataset_id)}",
method="PATCH",
data=DatasetPatch(name=name),
exceptions_for_status={
409: DatasetConflict(name),
404: DatasetNotFound(dataset_id),
},
).json()
)
def url(self, dataset: Union[str, Dataset]) -> str:
"""
Get the URL for a dataset.
:param dataset: The dataset ID, name, or object.
:raises DatasetNotFound: If the dataset can't be found.
"""
dataset_id = self.resolve_dataset(dataset).id
return f"{self.config.agent_address}/ds/{self.url_quote(dataset_id)}"
def _not_found_err_msg(self, dataset: Union[str, Dataset]) -> str:
dataset = dataset if isinstance(dataset, str) else dataset.id
return (
f"'{dataset}': Make sure you're using a valid Beaker dataset ID or the "
f"*full* name of the dataset (with the account prefix, e.g. 'username/dataset_name')"
)
def _upload_file(
self,
dataset: Dataset,
size: int,
source: Union[PathOrStr, bytes],
target: PathOrStr,
progress: "Progress",
task_id: "TaskID",
ignore_errors: bool = False,
) -> int:
from ..progress import BufferedReaderWithProgress
assert dataset.storage is not None
if dataset.storage.scheme != "fh":
raise NotImplementedError(
f"Datasets API is not implemented for '{dataset.storage.scheme}' backend yet"
)
source_file_wrapper: BufferedReaderWithProgress
if isinstance(source, (str, Path, os.PathLike)):
source = Path(source)
if ignore_errors and not source.exists():
return 0
source_file_wrapper = BufferedReaderWithProgress(source.open("rb"), progress, task_id)
elif isinstance(source, bytes):
source_file_wrapper = BufferedReaderWithProgress(io.BytesIO(source), progress, task_id)
else:
raise ValueError(f"Expected path-like or raw bytes, got {type(source)}")
try:
body: Optional[BufferedReaderWithProgress] = source_file_wrapper
digest: Optional[str] = None
if size > self.REQUEST_SIZE_LIMIT:
@retriable()
def get_upload_id() -> str:
assert dataset.storage is not None # for mypy
response = self.request(
"uploads",
method="POST",
token=dataset.storage.token,
base_url=dataset.storage.base_url,
)
return response.headers[self.HEADER_UPLOAD_ID]
upload_id = get_upload_id()
written = 0
while written < size:
chunk = source_file_wrapper.read(self.REQUEST_SIZE_LIMIT)
if not chunk:
break
@retriable()
def upload() -> "Response":
assert dataset.storage is not None # for mypy
return self.request(
f"uploads/{upload_id}",
method="PATCH",
data=chunk,
token=dataset.storage.token,
base_url=dataset.storage.base_url,
headers={
self.HEADER_UPLOAD_LENGTH: str(size),
self.HEADER_UPLOAD_OFFSET: str(written),
},
)
response = upload()
written += len(chunk)
digest = response.headers.get(self.HEADER_DIGEST)
if digest:
break
if written != size:
raise UnexpectedEOFError(str(source))
body = None
@retriable()
def finalize():
assert dataset.storage is not None # for mypy
self.request(
f"datasets/{dataset.storage.id}/files/{str(target)}",
method="PUT",
data=body if size > 0 else b"",
token=dataset.storage.token,
base_url=dataset.storage.base_url,
headers=None if not digest else {self.HEADER_DIGEST: digest},
stream=body is not None and size > 0,
exceptions_for_status={
403: DatasetWriteError(dataset.id),
404: DatasetNotFound(self._not_found_err_msg(dataset.id)),
},
)
finalize()
return source_file_wrapper.total_read
finally:
source_file_wrapper.close()
def _stream_file(
self,
dataset: Dataset,
file: FileInfo,
chunk_size: Optional[int] = None,
offset: int = 0,
length: int = -1,
validate_checksum: bool = True,
) -> Generator[bytes, None, None]:
def stream_file() -> Generator[bytes, None, None]:
headers = {}
if offset > 0 and length > 0:
headers["Range"] = f"bytes={offset}-{offset + length - 1}"
elif offset > 0:
headers["Range"] = f"bytes={offset}-"
response = self.request(
f"datasets/{dataset.id}/files/{urllib.parse.quote(file.path, safe='')}",
method="GET",
stream=True,
headers=headers,
exceptions_for_status={404: FileNotFoundError(file.path)},
)
for chunk in response.iter_content(chunk_size=chunk_size or self.DOWNLOAD_CHUNK_SIZE):
yield chunk
if is_canceled is not None and is_canceled.is_set(): # type: ignore
raise ThreadCanceledError
contents_hash = None
if offset == 0 and validate_checksum and file.digest is not None:
contents_hash = file.digest.new_hasher()
retries = 0
while True:
try:
for chunk in stream_file():
if is_canceled is not None and is_canceled.is_set(): # type: ignore
raise ThreadCanceledError
offset += len(chunk)
if contents_hash is not None:
contents_hash.update(chunk)
yield chunk
break
except RequestException as err:
if retries < self.beaker.MAX_RETRIES:
log_and_wait(retries, err)
retries += 1
else:
raise
# Validate digest.
if file.digest is not None and contents_hash is not None:
actual_digest = Digest.from_decoded(
contents_hash.digest(), algorithm=file.digest.algorithm
)
if actual_digest != file.digest:
raise ChecksumFailedError(
f"Checksum for '{file.path}' failed. "
f"Expected '{file.digest}', got '{actual_digest}'."
)
def _download_file(
self,
dataset: Dataset,
file: FileInfo,
target_path: Path,
progress: Optional["Progress"] = None,
task_id: Optional["TaskID"] = None,
validate_checksum: bool = True,
chunk_size: Optional[int] = None,
) -> int:
import tempfile
total_bytes = 0
target_dir = target_path.parent
target_dir.mkdir(exist_ok=True, parents=True)
def on_failure():
if progress is not None and task_id is not None:
progress.advance(task_id, -total_bytes)
@retriable(
on_failure=on_failure,
recoverable_errors=(RequestException, ChecksumFailedError),
)
def download() -> int:
nonlocal total_bytes
tmp_target = tempfile.NamedTemporaryFile(
"w+b", dir=target_dir, delete=False, suffix=".tmp"
)
try:
for chunk in self._stream_file(
dataset,
file,
validate_checksum=validate_checksum,
chunk_size=chunk_size,
):
total_bytes += len(chunk)
tmp_target.write(chunk)
if progress is not None and task_id is not None:
progress.update(task_id, advance=len(chunk))
os.replace(tmp_target.name, target_path)
finally:
tmp_target.close()
if os.path.exists(tmp_target.name):
os.remove(tmp_target.name)
return total_bytes
return download()
| beaker-py-main | beaker/services/dataset.py |
import time
from datetime import datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
List,
Optional,
Sequence,
Set,
Union,
)
from ..aliases import PathOrStr
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
if TYPE_CHECKING:
from rich.progress import TaskID
class ExperimentClient(ServiceClient):
"""
Accessed via :data:`Beaker.experiment <beaker.Beaker.experiment>`.
"""
def get(self, experiment: str) -> Experiment:
"""
Get info about an experiment.
:param experiment: The experiment ID or name.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
def _get(id: str) -> Experiment:
return Experiment.from_json(
self.request(
f"experiments/{self.url_quote(id)}",
exceptions_for_status={404: ExperimentNotFound(self._not_found_err_msg(id))},
).json()
)
try:
# Could be an ID or full name, so we try that first.
return _get(experiment)
except ExperimentNotFound:
if "/" not in experiment:
# Try with adding the account name.
try:
return _get(f"{self.beaker.account.name}/{experiment}")
except ExperimentNotFound:
pass
# Try searching the default workspace.
if self.config.default_workspace is not None:
matches = self.beaker.workspace.experiments(match=experiment, limit=1)
if matches:
return matches[0]
raise
def create(
self,
name: str,
spec: Union[ExperimentSpec, PathOrStr],
workspace: Optional[Union[Workspace, str]] = None,
) -> Experiment:
"""
Create a new Beaker experiment with the given ``spec``.
:param name: The name to assign the experiment.
:param spec: The spec for the Beaker experiment. This can either be an
:class:`~beaker.data_model.experiment_spec.ExperimentSpec` instance or the path to a YAML spec file.
:param workspace: The workspace to create the experiment under. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises ValueError: If the name is invalid.
:raises ExperimentConflict: If an experiment with the given name already exists.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
if not isinstance(spec, ExperimentSpec):
spec = ExperimentSpec.from_file(spec)
spec.validate()
json_spec = spec.to_json()
workspace = self.resolve_workspace(workspace)
self._validate_spec(spec, workspace)
experiment_data = self.request(
f"workspaces/{workspace.id}/experiments",
method="POST",
query={"name": name},
data=json_spec,
exceptions_for_status={409: ExperimentConflict(name)},
).json()
return self.get(experiment_data["id"])
def spec(self, experiment: Union[str, Experiment]) -> ExperimentSpec:
"""
Get the :class:`spec <beaker.data_model.experiment_spec.ExperimentSpec>` of an experiment.
:param experiment: The experiment ID, name, or object.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
experiment_id = self.resolve_experiment(experiment).id
return ExperimentSpec.from_json(
self.request(
f"experiments/{self.url_quote(experiment_id)}/spec",
query={"version": SpecVersion.v2.value},
headers={"Accept": "application/json"},
).json()
)
def stop(self, experiment: Union[str, Experiment]):
"""
Stop an experiment.
:param experiment: The experiment ID, name, or object.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
experiment_id = self.resolve_experiment(experiment).id
self.request(
f"experiments/{self.url_quote(experiment_id)}/stop",
method="PUT",
exceptions_for_status={404: ExperimentNotFound(self._not_found_err_msg(experiment))},
)
def resume(self, experiment: Union[str, Experiment]):
"""
Resume a preempted experiment.
:param experiment: The experiment ID, name, or object.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
experiment_id = self.resolve_experiment(experiment).id
self.request(
f"experiments/{self.url_quote(experiment_id)}/resume",
method="POST",
exceptions_for_status={404: ExperimentNotFound(self._not_found_err_msg(experiment))},
)
def delete(self, experiment: Union[str, Experiment], delete_results_datasets: bool = True):
"""
Delete an experiment.
:param experiment: The experiment ID, name, or object.
:param delete_results_datasets: Also delete the experiment's results datasets.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
experiment = self.resolve_experiment(experiment)
if delete_results_datasets:
for task in self.tasks(experiment):
for job in task.jobs:
try:
dataset = self.beaker.job.results(job)
if dataset is not None:
self.beaker.dataset.delete(dataset)
except DatasetNotFound:
pass
self.request(
f"experiments/{self.url_quote(experiment.id)}",
method="DELETE",
exceptions_for_status={404: ExperimentNotFound(self._not_found_err_msg(experiment.id))},
)
def rename(self, experiment: Union[str, Experiment], name: str) -> Experiment:
"""
Rename an experiment.
:param experiment: The experiment ID, name, or object.
:param name: The new name for the experiment.
:raises ValueError: If the new name is invalid.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
experiment_id = self.resolve_experiment(experiment).id
return Experiment.from_json(
self.request(
f"experiments/{self.url_quote(experiment_id)}",
method="PATCH",
data=ExperimentPatch(name=name),
exceptions_for_status={
404: ExperimentNotFound(self._not_found_err_msg(experiment)),
409: ExperimentConflict(name),
},
).json()
)
def tasks(self, experiment: Union[str, Experiment]) -> Tasks:
"""
List the tasks in an experiment.
:param experiment: The experiment ID, name, or object.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
:examples:
>>> task = beaker.experiment.tasks(hello_world_experiment_name)["main"]
"""
experiment_id = self.resolve_experiment(experiment).id
tasks = [
Task.from_json(d)
for d in self.request(
f"experiments/{self.url_quote(experiment_id)}/tasks",
method="GET",
exceptions_for_status={
404: ExperimentNotFound(self._not_found_err_msg(experiment))
},
).json()
]
return Tasks(tasks)
def logs(
self,
experiment: Union[str, Experiment],
task: Optional[Union[str, Task]] = None,
quiet: bool = False,
since: Optional[Union[str, "datetime", "timedelta"]] = None,
) -> Generator[bytes, None, None]:
"""
Download the logs for an experiment.
Returns a generator with the streaming bytes from the download.
The generator should be exhausted, otherwise the logs downloaded will be incomplete.
.. important::
When there are multiple jobs for the given experiment / task, the logs for the latest job
will be returned.
.. seealso::
:meth:`Beaker.job.logs() <JobClient.logs>`
:param experiment: The experiment ID, name, or object.
:param task: The task ID, name, or object of a specific task from the Beaker experiment
to fetch logs for. Required if there are multiple tasks in the experiment.
:param quiet: If ``True``, progress won't be displayed.
:param since: Only show logs since a particular time. Could be a :class:`~datetime.datetime` object
(naive datetimes will be treated as UTC), a timestamp string in the form of RFC 3339
(e.g. "2013-01-02T13:23:37Z"), or a relative time
(e.g. a :class:`~datetime.timedelta` or a string like "42m").
:raises ValueError: The experiment has no tasks or jobs, or the experiment has multiple tasks but
``task`` is not specified.
:raises TaskNotFound: If the given task doesn't exist.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
exp = self.resolve_experiment(experiment)
job = self.latest_job(exp, task=task, ensure_finalized=False)
if job is None:
if task is None:
raise ValueError(f"Experiment {exp.id} has no jobs")
else:
raise ValueError(
f"Experiment {exp.id} has no jobs for task "
f"'{task if isinstance(task, str) else task.display_name}'"
)
return self.beaker.job.logs(job.id, quiet=quiet, since=since)
def metrics(
self, experiment: Union[str, Experiment], task: Optional[Union[str, Task]] = None
) -> Optional[Dict[str, Any]]:
"""
Get the metrics from a task in an experiment.
.. important::
When there are multiple jobs for the given experiment / task, the metrics for
the latest finalized job will be returned.
.. seealso::
:meth:`Beaker.job.metrics() <JobClient.metrics>`
:param experiment: The experiment ID, name, or object.
:param task: The task ID, name, or object of a specific task from the Beaker experiment
to fetch metrics for. Required if there are multiple tasks in the experiment.
:raises ValueError: The experiment has no tasks, or the experiment has multiple tasks but
``task`` is not specified.
:raises TaskNotFound: If the given task doesn't exist.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
exp = self.resolve_experiment(experiment)
job = self.latest_job(exp, task=task, ensure_finalized=True)
return None if job is None else self.beaker.job.metrics(job.id)
def results(
self, experiment: Union[str, Experiment], task: Optional[Union[str, Task]] = None
) -> Optional[Dataset]:
"""
Get the result dataset from a task in an experiment.
.. important::
When there are multiple jobs for the given experiment / task, the results for
dataset the latest finalized job will be returned.
.. seealso::
:meth:`Beaker.job.results() <JobClient.results>`
:param experiment: The experiment ID, name, or object.
:param task: The task ID, name, or object of a specific task from the Beaker experiment
to fetch results for. Required if there are multiple tasks in the experiment.
:raises ValueError: The experiment has no tasks, or the experiment has multiple tasks but
``task`` is not specified.
:raises TaskNotFound: If the given task doesn't exist.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
exp = self.resolve_experiment(experiment)
job = self.latest_job(exp, task=task, ensure_finalized=True)
if job is None:
return None
else:
return self.beaker.job.results(job)
def wait_for(
self,
*experiments: Union[str, Experiment],
timeout: Optional[float] = None,
poll_interval: float = 1.0,
quiet: bool = False,
strict: bool = False,
) -> List[Experiment]:
"""
Wait for experiments to finalize, returning the completed experiments as a list
in the same order they were given as input.
.. caution::
This method is experimental and may change or be removed in future releases.
.. seealso::
:meth:`as_completed()`
.. seealso::
:meth:`follow()`
.. seealso::
:meth:`Beaker.job.wait_for() <JobClient.wait_for>`
:param experiments: Experiment ID, name, or object.
:param timeout: Maximum amount of time to wait for (in seconds).
:param poll_interval: Time to wait between polling the experiment (in seconds).
:param quiet: If ``True``, progress won't be displayed.
:param strict: If ``True``, the exit code of each job will be checked, and a
:class:`~beaker.exceptions.JobFailedError` will be raised for non-zero exit codes.
:raises ExperimentNotFound: If any experiment can't be found.
:raises JobTimeoutError: If the ``timeout`` expires.
:raises DuplicateExperimentError: If the same experiment is given as an argument more than once.
:raises JobFailedError: If ``strict=True`` and any job finishes with a non-zero exit code.
:raises TaskStoppedError: If ``strict=True`` and a task is stopped
before a corresponding job is initialized.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
exp_id_to_position: Dict[str, int] = {}
exps_to_wait_on: List[Experiment] = []
for i, exp_ in enumerate(experiments):
exp = exp_ if isinstance(exp_, Experiment) else self.get(exp_)
exps_to_wait_on.append(exp)
if exp.id in exp_id_to_position:
raise DuplicateExperimentError(exp.display_name)
exp_id_to_position[exp.id] = i
completed_exps: List[Experiment] = list(
self.as_completed(
*exps_to_wait_on,
timeout=timeout,
poll_interval=poll_interval,
quiet=quiet,
strict=strict,
)
)
return sorted(completed_exps, key=lambda exp: exp_id_to_position[exp.id])
def as_completed(
self,
*experiments: Union[str, Experiment],
timeout: Optional[float] = None,
poll_interval: float = 1.0,
quiet: bool = False,
strict: bool = False,
) -> Generator[Experiment, None, None]:
"""
Wait for experiments to finalize, returning an iterator that yields experiments as they
complete.
.. caution::
This method is experimental and may change or be removed in future releases.
.. seealso::
:meth:`wait_for()`
.. seealso::
:meth:`follow()`
.. seealso::
:meth:`Beaker.job.as_completed() <JobClient.as_completed>`
:param experiments: Experiment ID, name, or object.
:param timeout: Maximum amount of time to wait for (in seconds).
:param poll_interval: Time to wait between polling the experiment (in seconds).
:param quiet: If ``True``, progress won't be displayed.
:param strict: If ``True``, the exit code of each job will be checked, and a
:class:`~beaker.exceptions.JobFailedError` will be raised for non-zero exit codes.
:raises ExperimentNotFound: If any experiment can't be found.
:raises JobTimeoutError: If the ``timeout`` expires.
:raises DuplicateExperimentError: If the same experiment is given as an argument more than once.
:raises JobFailedError: If ``strict=True`` and any job finishes with a non-zero exit code.
:raises TaskStoppedError: If ``strict=True`` and a task is stopped
before a corresponding job is initialized.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
if timeout is not None and timeout <= 0:
raise ValueError("'timeout' must be a positive number")
from ..progress import get_exps_and_jobs_progress
start = time.monotonic()
# Initialize progress trackers.
live_display, experiments_progress, jobs_progress = get_exps_and_jobs_progress(quiet)
# Keeps track of the IDs of each job for each task in each experiment.
exp_to_task_to_job: Dict[str, Dict[str, Optional[str]]] = {}
# Keeps track of the progress tracker "TaskID"s for each experiment.
exp_to_progress_task: Dict[str, "TaskID"] = {}
# Keeps track of experiments that haven't been returned yet.
incomplete_exps: Set[str] = set()
# Keeps track of jobs that have been finalized.
finalized_jobs: Set[str] = set()
# Keeps track of tasks that were stopped before a job was created.
stopped_tasks: Set[str] = set()
def num_completed_tasks(exp_id: str) -> int:
return len(
[
job_id
for _, job_id in exp_to_task_to_job[exp_id].items()
if job_id is not None and job_id in finalized_jobs
]
)
def num_stopped_tasks(exp_id: str) -> int:
return len(
[
task_id
for task_id in exp_to_task_to_job[exp_id].keys()
if task_id in stopped_tasks
]
)
def total_tasks(exp_id: str) -> int:
return len(exp_to_task_to_job[exp_id])
def experiment_finalized(exp_id: str) -> bool:
return (num_stopped_tasks(exp_id) + num_completed_tasks(exp_id)) == total_tasks(exp_id)
def complete_experiment(exp_id: str) -> Experiment:
incomplete_exps.remove(exp_id)
experiments_progress.update(exp_to_progress_task[exp_id], completed=total_tasks(exp_id))
return self.get(exp_id)
with live_display:
# Populate progress trackers and state variables, also yielding
# any experiments that are already complete.
for e in experiments:
experiment = self.get(e.id if isinstance(e, Experiment) else e)
exp_id = experiment.id
incomplete_exps.add(exp_id)
# Ensure experiment is unique.
if exp_id in exp_to_task_to_job:
raise DuplicateExperimentError(experiment.display_name)
# Get state of experiment.
exp_to_task_to_job[exp_id] = {}
tasks = self.tasks(experiment)
for task in tasks:
latest_job = self._latest_job(task.jobs)
exp_to_task_to_job[exp_id][task.id] = (
None if latest_job is None else latest_job.id
)
if not task.jobs and not task.schedulable:
stopped_tasks.add(task.id)
# Add to progress tracker.
exp_to_progress_task[exp_id] = experiments_progress.add_task(
experiment.display_name,
total=total_tasks(exp_id),
)
# Now wait for the incomplete experiments to finalize.
while incomplete_exps:
# Collect (registered) incomplete jobs and also yield any experiments
# that have been finalized or stopped.
incomplete_jobs: List[str] = []
for exp_id, task_to_job in exp_to_task_to_job.items():
if exp_id in incomplete_exps:
if strict:
for task_id in task_to_job:
if task_id in stopped_tasks:
raise TaskStoppedError(task_id)
if not experiment_finalized(exp_id):
for task_id, job_id in task_to_job.items():
if job_id is not None and job_id not in finalized_jobs:
incomplete_jobs.append(job_id)
else:
# Experiment has just completed, yield it.
yield complete_experiment(exp_id)
# Check for timeout.
elapsed = time.monotonic() - start
if timeout is not None and elapsed >= timeout:
raise JobTimeoutError
if incomplete_jobs:
# Wait for current stack of incomplete jobs to finalize.
for job in self.beaker.job._as_completed(
*incomplete_jobs,
timeout=None if timeout is None else timeout - elapsed,
poll_interval=poll_interval,
quiet=quiet,
_progress=jobs_progress,
):
assert job.execution is not None
exp_id = job.execution.experiment
task_id = job.execution.task
if job.was_preempted:
# Job was preempted. Another job will start soon so we just stop
# tracking this one.
exp_to_task_to_job[exp_id][task_id] = None
else:
finalized_jobs.add(job.id)
# Ensure job was successful if `strict==True`.
if strict:
job.check()
# Update progress display.
experiments_progress.advance(exp_to_progress_task[exp_id])
# Check if corresponding experiment is now finalized.
if experiment_finalized(exp_id) and exp_id in incomplete_exps:
# Experiment has just completed, yield it.
yield complete_experiment(exp_id)
else:
# Wait for `poll_interval` to give Beaker a chance to register jobs.
time.sleep(poll_interval)
# Now check for jobs that haven't been registered yet.
for exp_id, task_to_job in exp_to_task_to_job.items():
if experiment_finalized(exp_id):
# Experiment already finalized, no need for anything.
continue
if all(job_id is not None for job_id in task_to_job.values()):
# All tasks already have registered jobs.
continue
for task in self.tasks(exp_id):
if task_to_job[task.id] is not None:
continue
if not task.jobs and not task.schedulable:
# Task was stopped before a job was created.
stopped_tasks.add(task.id)
elif task.jobs:
latest_job = self._latest_job(task.jobs)
assert latest_job is not None
task_to_job[task.id] = latest_job.id
def follow(
self,
experiment: Union[str, Experiment],
task: Optional[Union[str, Task]] = None,
timeout: Optional[float] = None,
strict: bool = False,
include_timestamps: bool = True,
) -> Generator[bytes, None, Experiment]:
"""
Follow an experiment live, creating a generator that produces log lines
(as bytes) from the task's job as they become available.
The return value of the generator is the final
:class:`~beaker.data_model.experiment.Experiment` object.
.. seealso::
:meth:`logs()`
.. seealso::
:meth:`wait_for()`
.. seealso::
:meth:`as_completed()`
.. seealso::
:meth:`Beaker.job.follow() <JobClient.follow>`
:param experiment: Experiment ID, name, or object.
:param task: The task ID, name, or object of a specific task from the Beaker experiment
to follow. Required if there are multiple tasks in the experiment.
:param timeout: Maximum amount of time to wait for (in seconds).
:param strict: If ``True``, the exit code of the job will be checked, and a
:class:`~beaker.exceptions.JobFailedError` will be raised for non-zero exit codes.
:param include_timestamps: If ``True`` (the default) timestamps from the Beaker logs
will be included in the output.
:raises ExperimentNotFound: If any experiment can't be found.
:raises ValueError: The experiment has no tasks or jobs, or the experiment has multiple tasks but
``task`` is not specified.
:raises TaskNotFound: If the given task doesn't exist.
:raises JobTimeoutError: If the ``timeout`` expires.
:raises JobFailedError: If ``strict=True`` and the task's job finishes with a non-zero exit code.
:raises TaskStoppedError: If ``strict=True`` and a task is stopped
before a corresponding job is initialized.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
:examples:
>>> for line in beaker.experiment.follow(hello_world_experiment_name):
... # Every log line from Beaker starts with an RFC 3339 UTC timestamp
... # (e.g. '2021-12-07T19:30:24.637600011Z'). If we don't want to print
... # the timestamps we can split them off like this:
... line = line[line.find(b"Z ")+2:]
... print(line.decode(errors="ignore"), end="")
<BLANKLINE>
Hello from Docker!
This message shows that your installation appears to be working correctly.
<BLANKLINE>
...
"""
if timeout is not None and timeout <= 0:
raise ValueError("'timeout' must be a positive number")
start = time.monotonic()
job: Optional[Job] = None
while job is None:
actual_task = self._task(experiment, task=task)
if actual_task.jobs:
job = self.latest_job(experiment, task=actual_task)
elif not actual_task.schedulable:
if strict:
raise TaskStoppedError(
task.id if isinstance(task, Task) else task, task=actual_task
)
else:
return self.get(
experiment.id if isinstance(experiment, Experiment) else experiment
)
if timeout is not None and time.monotonic() - start >= timeout:
raise JobTimeoutError(
"Job for task failed to initialize within '{timeout}' seconds"
)
time.sleep(2.0)
assert job is not None # for mypy
yield from self.beaker.job.follow(job, strict=strict, include_timestamps=include_timestamps)
return self.get(experiment.id if isinstance(experiment, Experiment) else experiment)
def url(
self, experiment: Union[str, Experiment], task: Optional[Union[str, Task]] = None
) -> str:
"""
Get the URL for an experiment.
:param experiment: The experiment ID, name, or object.
:param task: The task ID, name, or object of a specific task from the Beaker experiment
to get the url for.
:raises TaskNotFound: If the given task doesn't exist.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
experiment = self.resolve_experiment(experiment)
experiment_url = f"{self.config.agent_address}/ex/{self.url_quote(experiment.id)}"
if task is None:
return experiment_url
else:
task_id: str
if isinstance(task, Task):
task_id = task.id
else:
for t in self.tasks(experiment):
if t.name == task or t.id == task:
task_id = t.id
break
else:
raise TaskNotFound(f"No task '{task}' in experiment {experiment.id}")
return f"{experiment_url}/tasks/{task_id}"
def latest_job(
self,
experiment: Union[str, Experiment],
task: Optional[Union[str, Task]] = None,
ensure_finalized: bool = False,
) -> Optional[Job]:
"""
Get the latest job that ran for a task in an experiment.
:param experiment: The experiment ID, name, or object.
:param task: The take ID, name, or object.
:param ensure_finalized: Consider only finalized jobs.
:raises ValueError: The experiment has no tasks, or the experiment has multiple tasks but
``task`` is not specified.
:raises TaskNotFound: If the given task doesn't exist.
:raises ExperimentNotFound: If the experiment can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return self._latest_job(
self._task(experiment, task).jobs, ensure_finalized=ensure_finalized
)
def _task(
self, experiment: Union[str, Experiment], task: Optional[Union[str, Task]] = None
) -> Task:
tasks = list(self.tasks(experiment))
exp_id = experiment if isinstance(experiment, str) else experiment.id
if not tasks:
raise ValueError(f"Experiment '{exp_id}' has no tasks")
else:
if task is None:
if len(tasks) == 1:
return tasks[0]
else:
raise ValueError(
f"'task' required since experiment '{exp_id}' has multiple tasks"
)
else:
task_name_or_id = task.id if isinstance(task, Task) else task
tasks = [t for t in tasks if t.name == task_name_or_id or t.id == task_name_or_id]
if tasks:
return tasks[0]
else:
raise TaskNotFound(f"No task '{task_name_or_id}' in experiment '{exp_id}'")
def _not_found_err_msg(self, experiment: Union[str, Experiment]) -> str:
experiment = experiment if isinstance(experiment, str) else experiment.id
return (
f"'{experiment}': Make sure you're using a valid Beaker experiment ID or the "
f"*full* name of the experiment (with the account prefix, e.g. 'username/experiment_name')"
)
def _validate_spec(self, spec: ExperimentSpec, workspace: Workspace) -> None:
for task in spec.tasks:
# Make sure image exists.
if task.image.beaker is not None:
self.beaker.image.get(task.image.beaker)
# Make sure all beaker data sources exist.
for data_mount in task.datasets or []:
source = data_mount.source
if source.beaker is not None:
self.beaker.dataset.get(source.beaker)
if source.secret is not None:
self.beaker.secret.get(source.secret, workspace=workspace)
if source.result is not None:
if source.result not in {t.name for t in spec.tasks}:
raise ValueError(
f"Data mount result source '{source.result}' not found in spec"
)
# Make sure secrets in env variables exist.
for env_var in task.env_vars or []:
if env_var.secret is not None:
self.beaker.secret.get(env_var.secret, workspace=workspace)
# Make sure cluster exists.
if task.context.cluster:
self.beaker.cluster.get(task.context.cluster)
def _latest_job(self, jobs: Sequence[Job], ensure_finalized: bool = False) -> Optional[Job]:
if ensure_finalized:
jobs = [
job
for job in jobs
if job.status.current == CurrentJobStatus.finalized and job.execution is not None
]
if not jobs:
return None
return sorted(jobs, key=lambda job: job.status.created)[-1]
| beaker-py-main | beaker/services/experiment.py |
from typing import Optional, Union
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
class SecretClient(ServiceClient):
"""
Accessed via :data:`Beaker.secret <beaker.Beaker.secret>`.
"""
def get(self, secret: str, workspace: Optional[Union[str, Workspace]] = None) -> Secret:
"""
Get metadata about a secret.
:param secret: The name of the secret.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises SecretNotFound: If the secret doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace = self.resolve_workspace(workspace, read_only_ok=True)
return Secret.from_json(
self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(secret)}",
method="GET",
exceptions_for_status={404: SecretNotFound(secret)},
).json()
)
def read(
self, secret: Union[str, Secret], workspace: Optional[Union[str, Workspace]] = None
) -> str:
"""
Read the value of a secret.
:param secret: The secret name or object.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises SecretNotFound: If the secret doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace = self.resolve_workspace(workspace, read_only_ok=True)
name = secret.name if isinstance(secret, Secret) else secret
return self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(name)}/value",
method="GET",
).content.decode()
def write(
self, name: str, value: str, workspace: Optional[Union[str, Workspace]] = None
) -> Secret:
"""
Write a new secret or update an existing one.
:param name: The name of the secret.
:param value: The value to write to the secret.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace = self.resolve_workspace(workspace)
return Secret.from_json(
self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(name)}/value",
method="PUT",
data=value.encode(),
).json()
)
def delete(self, secret: Union[str, Secret], workspace: Optional[Union[str, Workspace]] = None):
"""
Permanently delete a secret.
:param secret: The secret name or object.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises SecretNotFound: If the secret doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace = self.resolve_workspace(workspace)
name = secret.name if isinstance(secret, Secret) else secret
return self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(name)}",
method="DELETE",
exceptions_for_status={404: SecretNotFound(secret)},
)
| beaker-py-main | beaker/services/secret.py |
from typing import Dict, List, Optional, Union
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
class ClusterClient(ServiceClient):
"""
Accessed via :data:`Beaker.cluster <beaker.Beaker.cluster>`.
"""
def get(self, cluster: str) -> Cluster:
"""
Get information about the cluster.
:param cluster: The cluster name or ID.
:raises ClusterNotFound: If the cluster doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
def _get(id: str) -> Cluster:
return Cluster.from_json(
self.request(
f"clusters/{id}",
exceptions_for_status={404: ClusterNotFound(self._not_found_err_msg(id))},
).json()
)
try:
# Could be a cluster ID, so we try that first before trying to resolve the name.
return _get(cluster)
except ClusterNotFound:
try:
cluster_name = self.resolve_cluster_name(cluster)
return _get(cluster_name)
except (ValueError, OrganizationNotSet, ClusterNotFound):
# If the name was invalid, we'll just raise the original error.
pass
raise
def create(
self,
name: str,
max_size: int = 1,
preemptible: bool = False,
cpus: Optional[float] = None,
gpus: int = 0,
gpu_type: Optional[str] = None,
memory: Optional[str] = None,
) -> Cluster:
"""
Create a new Beaker cloud cluster.
.. note::
For creating on-premise clusters you should still use the `Beaker CLI
<https://github.com/allenai/beaker>`_.
:param name: The name to assign to the new cluster.
If :data:`Config.default_org <beaker.Config.default_org>` is not set,
the name should start with the name of an organization:
"{organization}/{cluster_name}", e.g. "ai2/my-new-cluster".
:param max_size: The maximum number of nodes the cluster can scale up to.
:param preemptible: Use preemptible cloud machines for the nodes.
:param cpus: The number of virtual CPU available to each node.
:param gpus: The number of GPUs available to each node.
:param gpu_type: The type of GPU available to each node.
:param memory: The amount of memory available to each node, specified as a number
with a unit suffix. E.g. "2.5GiB".
:raises ValueError: If the cluster name or requested resources are invalid.
:raises ClusterConflict: If a cluster by that name already exists.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
organization, cluster_name = self.resolve_cluster_name(name).split("/", 1)
if not cpus and not gpus and not gpu_type and not memory:
raise ValueError("Cloud clusters must specify at least 1 resource")
return Cluster.from_json(
self.request(
f"clusters/{self.url_quote(organization)}",
method="POST",
data=ClusterSpec(
name=cluster_name,
capacity=max_size,
preemptible=preemptible,
spec=NodeResources(
cpu_count=cpus, gpu_count=gpus, gpu_type=gpu_type, memory=memory
),
),
exceptions_for_status={409: ClusterConflict(cluster_name)},
).json()
)
def update(self, cluster: Union[str, Cluster], max_size: int) -> Cluster:
"""
Modify a cluster.
:param cluster: The cluster ID, full name, or object.
:param max_size: The maximum number of nodes.
:raises ClusterNotFound: If the cluster doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
cluster_name = self.resolve_cluster(cluster).full_name
return Cluster.from_json(
self.request(
f"clusters/{cluster_name}",
method="PATCH",
data=ClusterPatch(capacity=max_size),
exceptions_for_status={404: ClusterNotFound(self._not_found_err_msg(cluster))},
).json()
)
def delete(self, cluster: Union[str, Cluster]):
"""
Delete a cluster.
:param cluster: The cluster ID, full name, or object.
:raises ClusterNotFound: If the cluster doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
cluster_name = self.resolve_cluster(cluster).full_name
self.request(
f"clusters/{cluster_name}",
method="DELETE",
exceptions_for_status={404: ClusterNotFound(self._not_found_err_msg(cluster))},
)
def list(self, org: Optional[Union[str, Organization]] = None) -> List[Cluster]:
"""
List clusters under an organization.
:param org: The organization name or object. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
org_id = self.resolve_org(org).id
return [
Cluster.from_json(d)
for d in self.request(
f"clusters/{org_id}",
method="GET",
exceptions_for_status={404: OrganizationNotFound(org_id)},
).json()["data"]
]
def nodes(self, cluster: Union[str, Cluster]) -> List[Node]:
"""
List the nodes in a cluster.
:param cluster: The cluster ID, full name, or object.
:raises ClusterNotFound: If the cluster doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
cluster_name = self.resolve_cluster(cluster).full_name
return [
Node.from_json(d)
for d in self.request(
f"clusters/{cluster_name}/nodes",
method="GET",
exceptions_for_status={404: ClusterNotFound(self._not_found_err_msg(cluster))},
).json()["data"]
]
def utilization(self, cluster: Union[str, Cluster]) -> ClusterUtilization:
"""
Get current utilization stats for each node in a cluster.
:param cluster: The cluster ID, full name, or object.
:raises ClusterNotFound: If the cluster doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
cluster = self.resolve_cluster(cluster)
nodes = self.nodes(cluster)
running_jobs = 0
queued_jobs = 0
running_preemptible_jobs = 0
node_to_util: Dict[str, Dict[str, Union[int, float]]] = {
node.id: {
"running_jobs": 0,
"running_preemptible_jobs": 0,
"gpus_used": 0,
"cpus_used": 0.0,
}
for node in nodes
}
for job in self.beaker.job.list(cluster=cluster, finalized=False):
if job.status.current in (CurrentJobStatus.running, CurrentJobStatus.idle):
if job.node not in node_to_util:
continue
running_jobs += 1
if job.priority == Priority.preemptible:
running_preemptible_jobs += 1
elif job.status.current == CurrentJobStatus.created:
queued_jobs += 1
if job.node is not None:
if job.node not in node_to_util:
continue # unlikely
node_util = node_to_util[job.node]
node_util["running_jobs"] += 1
if job.priority == Priority.preemptible:
node_util["running_preemptible_jobs"] += 1
if job.requests is not None:
if job.requests.gpu_count is not None:
node_util["gpus_used"] += job.requests.gpu_count
if job.requests.cpu_count is not None:
node_util["cpus_used"] += job.requests.cpu_count
return ClusterUtilization(
cluster=cluster,
running_jobs=running_jobs,
queued_jobs=queued_jobs,
running_preemptible_jobs=running_preemptible_jobs,
nodes=tuple(
[
NodeUtilization(
id=node.id,
hostname=node.hostname,
limits=node.limits,
running_jobs=int(node_to_util[node.id]["running_jobs"]),
running_preemptible_jobs=int(
node_to_util[node.id]["running_preemptible_jobs"]
),
used=NodeResources(
gpu_count=None
if node.limits.gpu_count is None
else int(
min(node.limits.gpu_count, node_to_util[node.id]["gpus_used"])
),
cpu_count=None
if node.limits.cpu_count is None
else min(node.limits.cpu_count, node_to_util[node.id]["cpus_used"]),
gpu_type=node.limits.gpu_type,
),
free=NodeResources(
gpu_count=None
if node.limits.gpu_count is None
else int(
max(0, node.limits.gpu_count - node_to_util[node.id]["gpus_used"])
),
cpu_count=None
if node.limits.cpu_count is None
else max(0, node.limits.cpu_count - node_to_util[node.id]["cpus_used"]),
gpu_type=node.limits.gpu_type,
),
cordoned=node.cordoned is not None,
)
for node in nodes
]
),
)
def filter_available(
self, resources: TaskResources, *clusters: Union[str, Cluster]
) -> List[ClusterUtilization]:
"""
Filter out clusters that don't have enough available resources, returning
a list of :class:`ClusterUtilization <beaker.data_model.cluster.ClusterUtilization>` for each
cluster that has sufficient resources.
This can be used, for example, to automatically find an on-premise cluster with enough
free resources to run a particular task.
.. caution::
This method is experimental and may change or be removed in future releases.
:param resources: The requested resources.
:param clusters: Clusters to inspect and filter.
:raises ClusterNotFound: If one of the clusters doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
def node_is_compat(node_shape: NodeResources) -> bool:
if resources.gpu_count and (
node_shape.gpu_count is None or node_shape.gpu_count < resources.gpu_count
):
return False
if resources.cpu_count and (
node_shape.cpu_count is None or node_shape.cpu_count < resources.cpu_count
):
return False
# TODO: check memory too
return True
def cluster_is_available(cluster_: Union[str, Cluster]) -> Optional[ClusterUtilization]:
cluster: Cluster = self.resolve_cluster(cluster_)
if cluster.node_shape is not None and not node_is_compat(cluster.node_shape):
return None
cluster_utilization = self.utilization(cluster)
if cluster.autoscale and len(cluster_utilization.nodes) < cluster.capacity:
available.append(cluster_utilization)
else:
for node_util in cluster_utilization.nodes:
if not node_util.cordoned and node_is_compat(node_util.free):
return cluster_utilization
return None
available: List[ClusterUtilization] = []
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for cluster_ in clusters:
futures.append(executor.submit(cluster_is_available, cluster_))
for future in concurrent.futures.as_completed(futures):
cluster_util = future.result()
if cluster_util is not None:
available.append(cluster_util)
return sorted(available, key=lambda util: (util.queued_jobs, util.running_jobs))
def url(self, cluster: Union[str, Cluster]) -> str:
"""
Get the URL for a cluster.
:param cluster: The cluster ID, full name, or object.
:raises ClusterNotFound: If the cluster doesn't exist.
"""
cluster_name = self.resolve_cluster(cluster).full_name
return f"{self.config.agent_address}/cl/{cluster_name}/details"
def _not_found_err_msg(self, cluster: Union[str, Cluster]) -> str:
cluster = cluster if isinstance(cluster, str) else cluster.id
return (
f"'{cluster}': Make sure you're using a valid ID or *full* name of the cluster "
f"(with the organization prefix, e.g. 'org/cluster_name')"
)
| beaker-py-main | beaker/services/cluster.py |
from typing import Generator, List, Optional, Union
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
class GroupClient(ServiceClient):
"""
Accessed via :data:`Beaker.group <beaker.Beaker.group>`.
"""
def get(self, group: str) -> Group:
"""
Get info about a group.
:param group: The group ID or name.
:raises GroupNotFound: If the group can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
def _get(id: str) -> Group:
return Group.from_json(
self.request(
f"groups/{self.url_quote(id)}",
exceptions_for_status={404: GroupNotFound(self._not_found_err_msg(id))},
).json()
)
try:
# Could be an ID or full name, so we try that first.
return _get(group)
except GroupNotFound:
if "/" not in group:
# Try with adding the account name.
try:
return _get(f"{self.beaker.account.name}/{group}")
except GroupNotFound:
pass
raise
def create(
self,
name: str,
*experiments: Union[str, Experiment],
description: Optional[str] = None,
workspace: Optional[Union[Workspace, str]] = None,
) -> Group:
"""
:param name: The name to assign the group.
:param experiments: Experiments to add to the group.
:param description: Group description.
:param workspace: The workspace to create the group under. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises ValueError: If the name is invalid.
:raises GroupConflict: If a group with the given name already exists.
:raises ExperimentNotFound: If any of the given experiments don't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
workspace = self.resolve_workspace(workspace)
exp_ids: List[str] = list(
set([self.resolve_experiment(experiment).id for experiment in experiments])
)
group_data = self.request(
"groups",
method="POST",
data=GroupSpec(
name=name,
description=description,
workspace=workspace.full_name,
experiments=exp_ids,
),
exceptions_for_status={409: GroupConflict(name)},
).json()
return self.get(group_data["id"])
def delete(self, group: Union[str, Group]):
"""
Delete a group.
:param group: The group ID, name, or object.
:raises GroupNotFound: If the group can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
group_id = self.resolve_group(group).id
self.request(
f"groups/{self.url_quote(group_id)}",
method="DELETE",
exceptions_for_status={404: GroupNotFound(self._not_found_err_msg(group))},
)
def rename(self, group: Union[str, Group], name: str) -> Group:
"""
Rename a group.
:param group: The group ID, name, or object.
:param name: The new name for the group.
:raises ValueError: If the new name is invalid.
:raises GroupNotFound: If the group can't be found.
:raises GroupConflict: If a group by that name already exists.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
group_id = self.resolve_group(group).id
return Group.from_json(
self.request(
f"groups/{self.url_quote(group_id)}",
method="PATCH",
data=GroupPatch(name=name),
exceptions_for_status={
404: GroupNotFound(self._not_found_err_msg(group)),
409: GroupConflict(name),
},
).json()
)
def add_experiments(self, group: Union[str, Group], *experiments: Union[str, Experiment]):
"""
Add experiments to a group.
:param group: The group ID, name, or object.
:param experiments: Experiments to add to the group.
:raises GroupNotFound: If the group can't be found.
:raises ExperimentNotFound: If any of the given experiments don't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
group_id = self.resolve_group(group).id
exp_ids: List[str] = list(
set([self.resolve_experiment(experiment).id for experiment in experiments])
)
self.request(
f"groups/{self.url_quote(group_id)}",
method="PATCH",
data=GroupPatch(add_experiments=exp_ids),
exceptions_for_status={404: GroupNotFound(self._not_found_err_msg(group))},
)
def remove_experiments(self, group: Union[str, Group], *experiments: Union[str, Experiment]):
"""
Remove experiments from a group.
:param group: The group ID, name, or object.
:param experiments: Experiments to remove from the group.
:raises GroupNotFound: If the group can't be found.
:raises ExperimentNotFound: If any of the given experiments don't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
group_id = self.resolve_group(group).id
exp_ids: List[str] = list(
set([self.resolve_experiment(experiment).id for experiment in experiments])
)
self.request(
f"groups/{self.url_quote(group_id)}",
method="PATCH",
data=GroupPatch(remove_experiments=exp_ids),
exceptions_for_status={404: GroupNotFound(self._not_found_err_msg(group))},
)
def list_experiments(self, group: Union[str, Group]) -> List[Experiment]:
"""
List experiments in a group.
:param group: The group ID, name, or object.
:raises GroupNotFound: If the group can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
group_id = self.resolve_group(group).id
exp_ids = self.request(
f"groups/{self.url_quote(group_id)}/experiments",
method="GET",
exceptions_for_status={404: GroupNotFound(self._not_found_err_msg(group))},
).json()
# TODO: make these requests concurrently.
return [self.beaker.experiment.get(exp_id) for exp_id in exp_ids or []]
def export_experiments(
self, group: Union[str, Group], quiet: bool = False
) -> Generator[bytes, None, None]:
"""
Export all experiments and metrics in a group as a CSV.
Returns a generator that should be exhausted to get the complete file.
:param group: The group ID, name, or object.
:param quiet: If ``True``, progress won't be displayed.
:raises GroupNotFound: If the group can't be found.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
group_id = self.resolve_group(group).id
resp = self.request(
f"groups/{self.url_quote(group_id)}/export.csv",
method="GET",
exceptions_for_status={404: GroupNotFound(self._not_found_err_msg(group))},
stream=True,
).iter_content(chunk_size=1024)
from ..progress import get_group_experiments_progress
with get_group_experiments_progress(quiet) as progress:
task_id = progress.add_task("Downloading:")
total = 0
for chunk in resp:
if chunk:
advance = len(chunk)
total += advance
progress.update(task_id, total=total + 1, advance=advance)
yield chunk
def url(self, group: Union[str, Group]) -> str:
"""
Get the URL for a group.
:param group: The group ID, name, or object.
:raises GroupNotFound: If the group can't be found.
"""
group_id = self.resolve_group(group).id
return f"{self.config.agent_address}/gr/{self.url_quote(group_id)}/compare"
def _not_found_err_msg(self, group: Union[str, Group]) -> str:
group = group if isinstance(group, str) else group.id
return (
f"'{group}': Make sure you're using a valid Beaker group ID or the "
f"*full* name of the group (with the account prefix, e.g. 'username/group_name')"
)
| beaker-py-main | beaker/services/group.py |
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
class NodeClient(ServiceClient):
"""
Accessed via :data:`Beaker.node <beaker.Beaker.node>`.
"""
def get(self, node_id: str) -> Node:
"""
Get information about a node.
:param node_id: The ID of the node.
:raises NodeNotFound: If the node doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return Node.from_json(
self.request(
f"nodes/{node_id}",
exceptions_for_status={404: NodeNotFound(node_id)},
).json()
)
| beaker-py-main | beaker/services/node.py |
from collections import defaultdict
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union
from ..data_model import *
from ..data_model.base import BasePage
from ..exceptions import *
from ..util import format_cursor
from .service_client import ServiceClient
T = TypeVar("T")
class WorkspaceClient(ServiceClient):
"""
Accessed via :data:`Beaker.workspace <beaker.Beaker.workspace>`.
"""
def get(self, workspace: Optional[str] = None) -> Workspace:
"""
Get information about the workspace.
:param workspace: The workspace name or ID. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace = workspace or self.config.default_workspace
if workspace is None:
raise WorkspaceNotSet("'workspace' argument required since default workspace not set")
def _get(id: str) -> Workspace:
return Workspace.from_json(
self.request(
f"workspaces/{self.url_quote(id)}",
exceptions_for_status={404: WorkspaceNotFound(self._not_found_err_msg(id))},
).json()
)
try:
# Could be a workspace ID, so we try that first before trying to resolve the name.
return _get(workspace)
except WorkspaceNotFound:
try:
# That didn't work, so now we'll try to resolve the name.
workspace_name = self.resolve_workspace_name(workspace)
return _get(workspace_name)
except (ValueError, OrganizationNotSet, WorkspaceNotFound):
# If the name was invalid, we'll just raise the original error.
pass
raise
def create(
self, workspace: str, *, description: Optional[str] = None, public: bool = False
) -> Workspace:
"""
Create a workspace.
:param workspace: The workspace name.
:param description: Text description for the workspace.
:param public: If the workspace should be public.
:raises ValueError: If the workspace name is invalid.
:raises WorkspaceConflict: If a workspace by that name already exists.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace_name(workspace)
org, name = workspace_name.split("/", 1)
return Workspace.from_json(
self.request(
"workspaces",
method="POST",
data=WorkspaceSpec(name=name, org=org, description=description, public=public),
exceptions_for_status={
409: WorkspaceConflict(workspace_name),
},
).json()
)
def ensure(self, workspace: str) -> Workspace:
"""
Ensure that the given workspace exists.
:param workspace: The workspace name.
:raises ValueError: If the workspace name is invalid.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
try:
return self.get(workspace)
except WorkspaceNotFound:
return self.create(workspace)
def archive(self, workspace: Union[str, Workspace]) -> Workspace:
"""
Archive a workspace, making it read-only.
:param workspace: The workspace to archive.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
if workspace is None: # could accidentally archive default workspace if None
raise TypeError("Expected 'str', got 'NoneType'")
workspace_name = self.resolve_workspace(workspace).full_name
return Workspace.from_json(
self.request(
f"workspaces/{self.url_quote(workspace_name)}",
method="PATCH",
data=WorkspacePatch(archive=True),
exceptions_for_status={
403: WorkspaceWriteError(workspace_name),
404: WorkspaceNotFound(self._not_found_err_msg(workspace_name)),
},
).json()
)
def unarchive(self, workspace: Union[str, Workspace]) -> Workspace:
"""
Unarchive a workspace.
:param workspace: The workspace to unarchive.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
if workspace is None: # could accidentally unarchive default workspace if None
raise TypeError("Expected 'str', got 'NoneType'")
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
return Workspace.from_json(
self.request(
f"workspaces/{self.url_quote(workspace_name)}",
method="PATCH",
data=WorkspacePatch(archive=False),
exceptions_for_status={
404: WorkspaceNotFound(self._not_found_err_msg(workspace_name))
},
).json()
)
def rename(self, workspace: Union[str, Workspace], name: str) -> Workspace:
"""
Rename a workspace.
:param workspace: The workspace to rename.
:param name: The new name to assign to the workspace.
This should only *not* include the organization.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises ValueError: If the new name is invalid.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
if workspace is None: # could accidentally rename default workspace if None
raise TypeError("Expected 'str', got 'NoneType'")
workspace_name = self.resolve_workspace(workspace).full_name
return Workspace.from_json(
self.request(
f"workspaces/{self.url_quote(workspace_name)}",
method="PATCH",
data=WorkspacePatch(name=name),
exceptions_for_status={
403: WorkspaceWriteError(workspace_name),
404: WorkspaceNotFound(self._not_found_err_msg(workspace_name)),
409: WorkspaceConflict(name),
},
).json()
)
def move(
self,
*items: Union[str, Image, Dataset, Experiment],
workspace: Optional[Union[str, Workspace]] = None,
):
"""
Move items into a workspace.
:param items: The items to move into the workspace.
:param workspace: The Beaker workspace name or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace).full_name
self.request(
f"workspaces/{self.url_quote(workspace_name)}/transfer",
method="POST",
data=WorkspaceTransferSpec(
ids=[item if isinstance(item, str) else item.id for item in items]
),
exceptions_for_status={
403: WorkspaceWriteError(workspace_name),
404: WorkspaceNotFound(self._not_found_err_msg(workspace_name)),
},
)
def _paginated_requests(
self,
page_class: Type[BasePage[T]],
path: str,
query: Dict[str, Any],
limit: Optional[int] = None,
workspace_name: Optional[str] = None,
) -> Generator[T, None, None]:
if limit:
query["limit"] = str(limit)
exceptions_for_status: Optional[Dict[int, Exception]] = (
None
if workspace_name is None
else {404: WorkspaceNotFound(self._not_found_err_msg(workspace_name))}
)
count = 0
while True:
page = page_class.from_json(
self.request(
path,
method="GET",
query=query,
exceptions_for_status=exceptions_for_status,
).json()
)
for x in page.data:
count += 1
yield x
if limit is not None and count >= limit:
return
query["cursor"] = page.next_cursor or page.next
if not query["cursor"]:
break
def iter(
self,
org: Optional[Union[str, Organization]] = None,
*,
author: Optional[Union[str, Account]] = None,
match: Optional[str] = None,
archived: Optional[bool] = None,
limit: Optional[int] = None,
sort_by: WorkspaceSort = WorkspaceSort.created,
descending: bool = True,
cursor: int = 0,
) -> Generator[Workspace, None, None]:
org = self.resolve_org(org)
query: Dict[str, str] = {
"org": org.id,
"field": str(sort_by),
"order": "descending" if descending else "ascending",
"cursor": format_cursor(cursor),
}
if author is not None:
query["author"] = (
author.name if isinstance(author, Account) else self.beaker.account.get(author).name
)
if match is not None:
query["q"] = match
if archived is not None:
query["archived"] = str(archived).lower()
yield from self._paginated_requests(WorkspacePage, "workspaces", query, limit=limit)
def list(
self,
org: Optional[Union[str, Organization]] = None,
*,
author: Optional[Union[str, Account]] = None,
match: Optional[str] = None,
archived: Optional[bool] = None,
limit: Optional[int] = None,
sort_by: WorkspaceSort = WorkspaceSort.created,
descending: bool = True,
cursor: int = 0,
) -> List[Workspace]:
"""
List workspaces belonging to an organization.
:param org: The organization name or object. If not specified,
:data:`Beaker.config.default_org <beaker.Config.default_org>` is used.
:param author: Only list workspaces authored by this account.
:param match: Only include workspaces matching the text.
:param archived: Only include/exclude archived workspaces.
:param limit: Limit the number of workspaces returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises OrganizationNotFound: If the organization doesn't exist.
:raises OrganizationNotSet: If neither ``org`` nor
:data:`Beaker.config.default_org <beaker.Config.default_org>` are set.
:raises AccountNotFound: If the author account doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return list(
self.iter(
org=org,
author=author,
match=match,
archived=archived,
limit=limit,
sort_by=sort_by,
descending=descending,
cursor=cursor,
)
)
def iter_images(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
limit: Optional[int] = None,
sort_by: ImageSort = ImageSort.created,
descending: bool = True,
cursor: int = 0,
) -> Generator[Image, None, None]:
"""
Iterate over the images in a workspace.
:param workspace: The Beaker workspace name or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include images matching the text.
:param limit: Limit the number of images returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
query: Dict[str, str] = {
"field": str(sort_by),
"order": "descending" if descending else "ascending",
"cursor": format_cursor(cursor),
}
if match is not None:
query["q"] = match
yield from self._paginated_requests(
ImagesPage,
f"workspaces/{self.url_quote(workspace_name)}/images",
query,
limit=limit,
workspace_name=workspace_name,
)
def images(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
limit: Optional[int] = None,
sort_by: ImageSort = ImageSort.created,
descending: bool = True,
cursor: int = 0,
) -> List[Image]:
"""
List the images in a workspace.
:param workspace: The Beaker workspace name or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include images matching the text.
:param limit: Limit the number of images returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return list(
self.iter_images(
workspace=workspace,
match=match,
limit=limit,
sort_by=sort_by,
descending=descending,
cursor=cursor,
)
)
def iter_experiments(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
limit: Optional[int] = None,
sort_by: ExperimentSort = ExperimentSort.created,
descending: bool = True,
cursor: int = 0,
) -> Generator[Experiment, None, None]:
"""
Iterate over the experiments in a workspace.
:param workspace: The Beaker workspace name or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include experiments matching the text.
:param limit: Limit the number of experiments returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
query: Dict[str, str] = {
"field": str(sort_by),
"order": "descending" if descending else "ascending",
"cursor": format_cursor(cursor),
}
if match is not None:
query["q"] = match
yield from self._paginated_requests(
ExperimentsPage,
f"workspaces/{self.url_quote(workspace_name)}/experiments",
query,
limit=limit,
workspace_name=workspace_name,
)
def experiments(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
limit: Optional[int] = None,
sort_by: ExperimentSort = ExperimentSort.created,
descending: bool = True,
cursor: int = 0,
) -> List[Experiment]:
"""
List the experiments in a workspace.
:param workspace: The Beaker workspace name or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include experiments matching the text.
:param limit: Limit the number of experiments returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return list(
self.iter_experiments(
workspace=workspace,
match=match,
limit=limit,
sort_by=sort_by,
descending=descending,
cursor=cursor,
)
)
def iter_datasets(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
results: Optional[bool] = None,
uncommitted: Optional[bool] = None,
limit: Optional[int] = None,
sort_by: DatasetSort = DatasetSort.created,
descending: bool = True,
cursor: int = 0,
) -> Generator[Dataset, None, None]:
"""
Iterate over the datasets in a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include datasets matching the text.
:param results: Only include/exclude experiment result datasets.
:param uncommitted: Only include/exclude uncommitted datasets.
:param limit: Limit the number of datasets returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
query: Dict[str, str] = {
"field": str(sort_by),
"order": "descending" if descending else "ascending",
"cursor": format_cursor(cursor),
}
if match is not None:
query["q"] = match
if results is not None:
query["results"] = str(results).lower()
if uncommitted is not None:
query["committed"] = str(not uncommitted).lower()
yield from self._paginated_requests(
DatasetsPage,
f"workspaces/{self.url_quote(workspace_name)}/datasets",
query,
limit=limit,
workspace_name=workspace_name,
)
def datasets(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
results: Optional[bool] = None,
uncommitted: Optional[bool] = None,
limit: Optional[int] = None,
sort_by: DatasetSort = DatasetSort.created,
descending: bool = True,
cursor: int = 0,
) -> List[Dataset]:
"""
List the datasets in a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include datasets matching the text.
:param results: Only include/exclude experiment result datasets.
:param uncommitted: Only include/exclude uncommitted datasets.
:param limit: Limit the number of datasets returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return list(
self.iter_datasets(
workspace=workspace,
match=match,
results=results,
uncommitted=uncommitted,
limit=limit,
sort_by=sort_by,
descending=descending,
cursor=cursor,
)
)
def secrets(self, workspace: Optional[Union[str, Workspace]] = None) -> List[Secret]:
"""
List secrets in a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
return [
Secret.from_json(d)
for d in self.request(
f"workspaces/{self.url_quote(workspace_name)}/secrets",
method="GET",
exceptions_for_status={
404: WorkspaceNotFound(self._not_found_err_msg(workspace_name))
},
).json()["data"]
]
def iter_groups(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
limit: Optional[int] = None,
sort_by: GroupSort = GroupSort.created,
descending: bool = True,
cursor: int = 0,
) -> Generator[Group, None, None]:
"""
Iterate over groups in a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include groups matching the text.
:param limit: Limit the number of groups returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
query: Dict[str, str] = {
"field": str(sort_by),
"order": "descending" if descending else "ascending",
"cursor": format_cursor(cursor),
}
if match is not None:
query["q"] = match
yield from self._paginated_requests(
GroupsPage,
f"workspaces/{self.url_quote(workspace_name)}/groups",
query,
limit=limit,
workspace_name=workspace_name,
)
def groups(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
match: Optional[str] = None,
limit: Optional[int] = None,
sort_by: GroupSort = GroupSort.created,
descending: bool = True,
cursor: int = 0,
) -> List[Group]:
"""
List groups in a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param match: Only include groups matching the text.
:param limit: Limit the number of groups returned.
:param sort_by: The field to sort the results by.
:param descending: Order the results in descending order according to the ``sort_by`` field.
:param cursor: Set the starting cursor for the query. You can use this to paginate the results.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return list(
self.iter_groups(
workspace=workspace,
match=match,
limit=limit,
sort_by=sort_by,
descending=descending,
cursor=cursor,
)
)
def get_permissions(
self, workspace: Optional[Union[str, Workspace]] = None
) -> WorkspacePermissions:
"""
Get workspace permissions.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
return WorkspacePermissions.from_json(
self.request(
f"workspaces/{self.url_quote(workspace_name)}/auth",
method="GET",
exceptions_for_status={
404: WorkspaceNotFound(self._not_found_err_msg(workspace_name))
},
).json()
)
def grant_permissions(
self,
auth: Permission,
*accounts: Union[str, Account],
workspace: Optional[Union[str, Workspace]] = None,
) -> WorkspacePermissions:
"""
Grant workspace permissions to accounts.
:param auth: The authorization level to grant (e.g. "read", "write", "all").
:param accounts: The accounts to grant permissions to.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises ValueError: If ``auth`` is invalid.
:raises AccountNotFound: If an account doesn't exist.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
if auth not in set(Permission):
raise ValueError(f"Authorization '{auth}' is invalid")
account_ids = [
account.id if isinstance(account, Account) else self.beaker.account.get(account).id
for account in accounts
]
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
self.request(
f"workspaces/{self.url_quote(workspace_name)}/auth",
method="PATCH",
data=WorkspacePermissionsPatch(
authorizations={account_id: auth for account_id in account_ids}
),
)
return self.get_permissions(workspace=workspace_name)
def set_visibility(
self, public: bool = False, workspace: Optional[Union[str, Workspace]] = None
) -> WorkspacePermissions:
"""
Set workspace visibility to public or private.
:param public: Public visibility.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
self.request(
f"workspaces/{self.url_quote(workspace_name)}/auth",
method="PATCH",
data=WorkspacePermissionsPatch(public=public),
exceptions_for_status={404: WorkspaceNotFound(self._not_found_err_msg(workspace_name))},
)
return self.get_permissions(workspace=workspace_name)
def revoke_permissions(
self, *accounts: Union[str, Account], workspace: Optional[Union[str, Workspace]] = None
) -> WorkspacePermissions:
"""
Revoke workspace permissions to accounts.
:param accounts: The accounts to revoke permissions for.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises AccountNotFound: If an account doesn't exist.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
account_ids = [
account.id if isinstance(account, Account) else self.beaker.account.get(account).id
for account in accounts
]
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
self.request(
f"workspaces/{self.url_quote(workspace_name)}/auth",
method="PATCH",
data=WorkspacePermissionsPatch(
authorizations={account_id: Permission.no_permission for account_id in account_ids}
),
exceptions_for_status={404: WorkspaceNotFound(self._not_found_err_msg(workspace_name))},
)
return self.get_permissions(workspace=workspace_name)
def url(self, workspace: Optional[Union[str, Workspace]] = None) -> str:
"""
Get the URL for a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
"""
workspace_name = self.resolve_workspace(workspace, read_only_ok=True).full_name
return f"{self.config.agent_address}/ws/{workspace_name}"
def clear(
self,
workspace: Optional[Union[str, Workspace]] = None,
*,
groups: bool = True,
experiments: bool = True,
images: bool = True,
datasets: bool = True,
secrets: bool = True,
older_than: Optional[datetime] = None,
):
"""
Remove groups, experiments, images, datasets, and secrets from a workspace.
:param workspace: The Beaker workspace name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param groups: Whether to delete groups.
:param experiments: Whether to delete experiments.
:param images: Whether to delete images.
:param datasets: Whether to delete datasets.
:param secrets: Whether to delete secrets.
:param older_than: Only delete objects created before this date.
:raises WorkspaceNotFound: If the workspace doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
import concurrent.futures
from itertools import chain
def should_delete(created: Optional[datetime]) -> bool:
if older_than is None or created is None:
return True
if any([dt.tzinfo is None for dt in (created, older_than)]):
return created.replace(tzinfo=None) < older_than.replace(tzinfo=None)
else:
return created < older_than
deletion_counts: Dict[str, int] = defaultdict(int)
with concurrent.futures.ThreadPoolExecutor() as executor:
deletion_futures = []
if groups:
for group in filter(lambda x: should_delete(x.created), self.groups(workspace)):
future = executor.submit(self.beaker.group.delete, group)
deletion_futures.append(future)
deletion_counts["groups_deleted"] += 1
if experiments:
for experiment in filter(
lambda x: should_delete(x.created), self.iter_experiments(workspace)
):
future = executor.submit(self.beaker.experiment.delete, experiment)
deletion_futures.append(future)
deletion_counts["experiments_deleted"] += 1
if images:
for image in filter(
lambda x: should_delete(x.committed), self.iter_images(workspace)
):
future = executor.submit(self.beaker.image.delete, image)
deletion_futures.append(future)
deletion_counts["images_deleted"] += 1
if datasets:
for dataset in filter(
lambda x: should_delete(x.created),
chain(
self.iter_datasets(workspace),
self.iter_datasets(workspace, uncommitted=True),
),
):
future = executor.submit(self.beaker.dataset.delete, dataset)
deletion_futures.append(future)
deletion_counts["datasets_deleted"] += 1
if secrets:
for secret in filter(lambda x: should_delete(x.created), self.secrets(workspace)):
future = executor.submit(self.beaker.secret.delete, secret, workspace)
deletion_futures.append(future)
deletion_counts["secrets_deleted"] += 1
done, _ = concurrent.futures.wait(deletion_futures)
for future in done:
try:
future.result()
except NotFoundError:
pass
return WorkspaceClearResult(**deletion_counts)
def _not_found_err_msg(self, workspace: str) -> str:
return (
f"'{workspace}': Make sure you're using the workspace ID or *full* name "
f"(with the organization prefix, e.g. 'org/workspace_name')."
)
| beaker-py-main | beaker/services/workspace.py |
from typing import TYPE_CHECKING, Dict, Optional, Union, cast
from docker.models.images import Image as DockerImage
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
if TYPE_CHECKING:
from rich.progress import TaskID
class ImageClient(ServiceClient):
"""
Accessed via :data:`Beaker.image <beaker.Beaker.image>`.
"""
def get(self, image: str) -> Image:
"""
Get info about an image on Beaker.
:param image: The Beaker image ID or name.
:raises ImageNotFound: If the image can't be found on Beaker.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
def _get(id: str) -> Image:
return Image.from_json(
self.request(
f"images/{self.url_quote(id)}",
exceptions_for_status={404: ImageNotFound(self._not_found_err_msg(id))},
).json()
)
try:
# Could be an ID or full name, so we try that first.
return _get(image)
except ImageNotFound:
if "/" not in image:
# Try with adding the account name.
try:
return _get(f"{self.beaker.account.name}/{image}")
except ImageNotFound:
pass
# Try searching the default workspace.
if self.config.default_workspace is not None:
matches = self.beaker.workspace.images(match=image, limit=1)
if matches:
return matches[0]
raise
def create(
self,
name: str,
image_tag: str,
workspace: Optional[Union[Workspace, str]] = None,
description: Optional[str] = None,
quiet: bool = False,
commit: bool = True,
) -> Image:
"""
Upload a Docker image to Beaker.
:param name: The name to assign to the image on Beaker.
:param image_tag: The tag of the local image you're uploading.
:param workspace: The workspace to upload the image to. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:param description: Text description of the image.
:param quiet: If ``True``, progress won't be displayed.
:param commit: Whether to commit the image after successful upload.
:raises ValueError: If the image name is invalid.
:raises ImageConflict: If an image with the given name already exists.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
workspace = self.resolve_workspace(workspace)
# Get local Docker image object.
image = cast(DockerImage, self.docker.images.get(image_tag))
# Create new image on Beaker.
image_id = self.request(
"images",
method="POST",
data=ImageSpec(
workspace=workspace.id,
image_id=image.id,
image_tag=image_tag,
description=description,
),
query={"name": name},
exceptions_for_status={409: ImageConflict(name)},
).json()["id"]
# Get the repo data for the Beaker image.
repo = ImageRepo.from_json(
self.request(f"images/{image_id}/repository", query={"upload": True}).json()
)
# Tag the local image with the new tag for the Beaker image.
image.tag(repo.image_tag)
# Push the image to Beaker.
from ..progress import get_image_upload_progress
with get_image_upload_progress(quiet) as progress:
layer_id_to_task: Dict[str, "TaskID"] = {}
for layer_state_data in self.docker.api.push(
repo.image_tag,
stream=True,
decode=True,
auth_config={
"username": repo.auth.user,
"password": repo.auth.password,
"server_address": repo.auth.server_address,
},
):
if "id" not in layer_state_data or "status" not in layer_state_data:
continue
layer_state = DockerLayerUploadState.from_json(layer_state_data)
# Get progress task ID for layer, initializing if it doesn't already exist.
task_id: "TaskID"
if layer_state.id not in layer_id_to_task:
task_id = progress.add_task(layer_state.id, start=True, total=1)
layer_id_to_task[layer_state.id] = task_id
else:
task_id = layer_id_to_task[layer_state.id]
# Update task progress description.
progress.update(
task_id, description=f"{layer_state.id}: {layer_state.status.title()}"
)
# Update task progress total and completed.
if (
layer_state.progress_detail.total is not None
and layer_state.progress_detail.current is not None
):
progress.update(
task_id,
total=layer_state.progress_detail.total,
completed=layer_state.progress_detail.current,
)
elif layer_state.status in {
DockerLayerUploadStatus.preparing,
DockerLayerUploadStatus.waiting,
}:
progress.update(
task_id,
total=1,
completed=0,
)
elif layer_state.status in {
DockerLayerUploadStatus.pushed,
DockerLayerUploadStatus.already_exists,
}:
progress.update(
task_id,
total=1,
completed=1,
)
if commit:
return self.commit(image_id)
else:
return self.get(image_id)
def commit(self, image: Union[str, Image]) -> Image:
"""
Commit an image.
:param image: The Beaker image ID, name, or object.
:raises ImageNotFound: If the image can't be found on Beaker.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
image_id = self.resolve_image(image).id
return Image.from_json(
self.request(
f"images/{image_id}",
method="PATCH",
data=ImagePatch(commit=True),
exceptions_for_status={404: ImageNotFound(self._not_found_err_msg(image))},
).json()
)
def delete(self, image: Union[str, Image]):
"""
Delete an image on Beaker.
:param image: The Beaker image ID, name, or object.
:raises ImageNotFound: If the image can't be found on Beaker.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
image_id = self.resolve_image(image).id
self.request(
f"images/{self.url_quote(image_id)}",
method="DELETE",
exceptions_for_status={404: ImageNotFound(self._not_found_err_msg(image))},
)
def rename(self, image: Union[str, Image], name: str) -> Image:
"""
Rename an image on Beaker.
:param image: The Beaker image ID, name, or object.
:param name: The new name for the image.
:raises ImageNotFound: If the image can't be found on Beaker.
:raises ValueError: If the image name is invalid.
:raises ImageConflict: If an image with the given name already exists.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
self.validate_beaker_name(name)
image_id = self.resolve_image(image).id
return Image.from_json(
self.request(
f"images/{image_id}",
method="PATCH",
data=ImagePatch(name=name),
exceptions_for_status={404: ImageNotFound(self._not_found_err_msg(image))},
).json()
)
def pull(self, image: Union[str, Image], quiet: bool = False) -> DockerImage:
"""
Pull an image from Beaker.
.. important::
This method returns a Docker :class:`~docker.models.images.Image`, not
a Beaker :class:`~beaker.data_model.image.Image`.
:param image: The Beaker image ID, name, or object.
:param quiet: If ``True``, progress won't be displayed.
:raises ImageNotFound: If the image can't be found on Beaker.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
image_id = self.resolve_image(image).id
repo = ImageRepo.from_json(self.request(f"images/{image_id}/repository").json())
from ..progress import get_image_download_progress
with get_image_download_progress(quiet) as progress:
layer_id_to_task: Dict[str, "TaskID"] = {}
for layer_state_data in self.docker.api.pull(
repo.image_tag,
stream=True,
decode=True,
auth_config={
"username": repo.auth.user,
"password": repo.auth.password,
"server_address": repo.auth.server_address,
},
):
if "id" not in layer_state_data or "status" not in layer_state_data:
continue
if layer_state_data["status"].lower().startswith("pulling "):
continue
layer_state = DockerLayerDownloadState.from_json(layer_state_data)
# Get progress task ID for layer, initializing if it doesn't already exist.
task_id: "TaskID"
if layer_state.id not in layer_id_to_task:
task_id = progress.add_task(layer_state.id, start=True, total=1)
layer_id_to_task[layer_state.id] = task_id
else:
task_id = layer_id_to_task[layer_state.id]
# Update task progress description.
progress.update(
task_id, description=f"{layer_state.id}: {layer_state.status.title()}"
)
# Update task progress total and completed.
if (
layer_state.progress_detail.total is not None
and layer_state.progress_detail.current is not None
):
progress.update(
task_id,
total=layer_state.progress_detail.total,
completed=layer_state.progress_detail.current,
)
elif layer_state.status in {
DockerLayerDownloadStatus.waiting,
DockerLayerDownloadStatus.extracting,
DockerLayerDownloadStatus.verifying_checksum,
}:
progress.update(
task_id,
total=1,
completed=0,
)
elif layer_state.status in {
DockerLayerDownloadStatus.download_complete,
DockerLayerDownloadStatus.pull_complete,
DockerLayerDownloadStatus.already_exists,
}:
progress.update(
task_id,
total=1,
completed=1,
)
local_image = cast(DockerImage, self.docker.images.get(repo.image_tag))
return local_image
def url(self, image: Union[str, Image]) -> str:
"""
Get the URL for an image.
:param image: The Beaker image ID, name, or object.
:raises ImageNotFound: If the image can't be found on Beaker.
"""
image_id = self.resolve_image(image).id
return f"{self.config.agent_address}/im/{self.url_quote(image_id)}"
def _not_found_err_msg(self, image: Union[str, Image]) -> str:
image = image if isinstance(image, str) else image.id
return (
f"'{image}': Make sure you're using a valid Beaker image ID or the "
f"*full* name of the image (with the account prefix, e.g. 'username/image_name')"
)
| beaker-py-main | beaker/services/image.py |
import io
import json
import logging
import urllib.parse
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import docker
import requests
from ..config import Config
from ..data_model import *
from ..data_model.base import BaseModel
from ..exceptions import *
from ..util import retriable
if TYPE_CHECKING:
from ..client import Beaker
class ServiceClient:
def __init__(self, beaker: "Beaker"):
self.beaker = beaker
self._base_url = f"{self.config.agent_address}/api/{self.beaker.API_VERSION}"
@property
def config(self) -> Config:
return self.beaker.config
@property
def docker(self) -> docker.DockerClient:
return self.beaker.docker
@property
def logger(self) -> logging.Logger:
return self.beaker.logger
def request(
self,
resource: str,
method: str = "GET",
query: Optional[Dict[str, Any]] = None,
data: Optional[Any] = None,
exceptions_for_status: Optional[Dict[int, Exception]] = None,
headers: Optional[Dict[str, str]] = None,
token: Optional[str] = None,
base_url: Optional[str] = None,
stream: bool = False,
timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> requests.Response:
def make_request(session: requests.Session) -> requests.Response:
# Build URL.
url = f"{base_url or self._base_url}/{resource}"
if query is not None:
url = url + "?" + urllib.parse.urlencode(query)
# Populate headers.
default_headers = {
"Authorization": f"Bearer {token or self.config.user_token}",
"Content-Type": "application/json",
"User-Agent": self.beaker.user_agent,
}
if headers is not None:
default_headers.update(headers)
# Validate request data.
request_data: Optional[Union[str, bytes, io.BufferedReader]] = None
if isinstance(data, BaseModel):
request_data = json.dumps(data.to_json())
elif isinstance(data, dict):
request_data = json.dumps(data)
elif isinstance(data, (str, bytes, io.BufferedReader)):
request_data = data
elif data is not None:
raise TypeError(
f"Unexpected type for 'data'. Expected 'dict' or 'BaseModel', got {type(data)}"
)
# Log request at DEBUG.
if isinstance(request_data, str):
self.logger.debug("SEND %s %s - %s", method, url, request_data)
elif isinstance(request_data, bytes):
self.logger.debug("SEND %s %s - %d bytes", method, url, len(request_data))
elif request_data is not None:
self.logger.debug("SEND %s %s - ? bytes", method, url)
else:
self.logger.debug("SEND %s %s", method, url)
# Make request.
response = getattr(session, method.lower())(
url,
headers=default_headers,
data=request_data,
stream=stream,
timeout=timeout or self.beaker._timeout,
)
# Log response at DEBUG.
if (
not stream
and self.logger.isEnabledFor(logging.DEBUG)
and len(response.content) < 1024
and response.text
):
self.logger.debug("RECV %s %s %s - %s", method, url, response, response.text)
else:
self.logger.debug("RECV %s %s %s", method, url, response)
if exceptions_for_status is not None and response.status_code in exceptions_for_status:
raise exceptions_for_status[response.status_code]
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
# Try parsing error message from the response
msg: Optional[str] = None
if response.text:
try:
msg = json.loads(response.text)["message"]
except (TypeError, KeyError, json.JSONDecodeError):
pass
if (
msg is not None
and response.status_code is not None
and 400 <= response.status_code < 500
):
# Raise a BeakerError if we're misusing the API (4xx error code).
raise BeakerError(msg)
elif msg is not None:
raise HTTPError(msg, response=response)
else:
raise
return response
if method in {"HEAD", "GET"}:
# We assume HEAD and GET calls won't modify state, so they're
# safe to retry for any recoverable error.
make_request = retriable()(make_request)
if self.beaker._session is not None:
return make_request(self.beaker._session)
else:
with self.beaker._make_session() as session:
return make_request(session)
def resolve_cluster_name(self, cluster_name: str) -> str:
if "/" not in cluster_name:
if self.config.default_org is not None:
self.validate_beaker_name(cluster_name)
return f"{self.config.default_org}/{cluster_name}"
else:
raise OrganizationNotSet(
f"No default organization set and cluster name doesn't include "
f"an organization ('{cluster_name}')"
)
else:
org, name = cluster_name.split("/", 1)
self.validate_beaker_name(name)
self.resolve_org(org)
return cluster_name
def resolve_workspace_name(self, workspace_name: str) -> str:
"""
Takes the name of a workspace (possibly non-existent) and returns a valid full name.
"""
if "/" not in workspace_name:
if self.config.default_org is not None:
self.validate_beaker_name(workspace_name)
return f"{self.config.default_org}/{workspace_name}"
else:
raise OrganizationNotSet(
f"No default organization set and workspace name doesn't include "
f"an organization ('{workspace_name}'). Make sure you're using a valid "
f"workspace full name or ID."
)
else:
org, name = workspace_name.split("/", 1)
self.validate_beaker_name(name)
self.resolve_org(org)
return workspace_name
def resolve_cluster(self, cluster: Union[str, Cluster]) -> Cluster:
if isinstance(cluster, Cluster):
return cluster
else:
return self.beaker.cluster.get(cluster)
def resolve_workspace(
self,
workspace: Optional[Union[str, Workspace]],
read_only_ok: bool = False,
) -> Workspace:
out: Workspace
if isinstance(workspace, Workspace):
out = workspace
else:
out = self.beaker.workspace.get(workspace)
if not read_only_ok and out.archived:
raise WorkspaceWriteError(f"Workspace '{out.full_name}' has been archived")
return out
def resolve_dataset(
self, dataset: Union[str, Dataset], ensure_storage: bool = False
) -> Dataset:
if isinstance(dataset, Dataset):
if ensure_storage and dataset.storage is None:
# Might need to get dataset again if 'storage' hasn't been set yet.
dataset = self.beaker.dataset.get(dataset.id)
if dataset.storage is None:
raise DatasetReadError(dataset.id)
return dataset
else:
dataset = self.beaker.dataset.get(dataset)
if ensure_storage and dataset.storage is None:
raise DatasetReadError(dataset.id)
return dataset
def resolve_experiment(self, experiment: Union[str, Experiment]) -> Experiment:
if isinstance(experiment, Experiment):
return experiment
else:
return self.beaker.experiment.get(experiment)
def resolve_image(self, image: Union[str, Image]) -> Image:
if isinstance(image, Image):
return image
else:
return self.beaker.image.get(image)
def resolve_group(self, group: Union[str, Group]) -> Group:
if isinstance(group, Group):
return group
else:
return self.beaker.group.get(group)
def resolve_org(self, org: Optional[Union[str, Organization]]) -> Organization:
if isinstance(org, Organization):
return org
else:
return self.beaker.organization.get(org)
def url_quote(self, id: str) -> str:
return urllib.parse.quote(id, safe="")
def validate_beaker_name(self, name: str):
if not name.replace("-", "").replace("_", "").replace(".", "").isalnum():
raise ValueError(
f"Invalid name '{name}'. Beaker names can only contain letters, "
f"digits, periods, dashes, and underscores."
)
| beaker-py-main | beaker/services/service_client.py |
from typing import List
from ..data_model import *
from ..exceptions import *
from ..util import cached_property
from .service_client import ServiceClient
class AccountClient(ServiceClient):
"""
Accessed via :data:`Beaker.account <beaker.Beaker.account>`.
"""
@cached_property(ttl=3 * 60)
def name(self) -> str:
"""
A convenience property to get username of your Beaker account.
"""
return self.whoami().name
def whoami(self) -> Account:
"""
Check who you are authenticated as.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return Account.from_json(self.request("user").json())
def list_organizations(self) -> List[Organization]:
"""
List all organizations you are a member of.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return [Organization.from_json(d) for d in self.request("user/orgs").json()["data"]]
def get(self, account: str) -> Account:
"""
Get information about an account.
:param account: The account name or ID.
:raises AccountNotFound: If the account doesn't exist.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises RequestException: Any other exception that can occur when contacting the
Beaker server.
"""
return Account.from_json(
self.request(
f"users/{self.url_quote(account)}",
method="GET",
exceptions_for_status={404: AccountNotFound(account)},
).json()
)
| beaker-py-main | beaker/services/account.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import logging
import os
import sys
from datetime import datetime
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath("../../"))
from beaker.version import VERSION, VERSION_SHORT # noqa: E402
# -- Project information -----------------------------------------------------
project = "beaker-py"
copyright = f"{datetime.today().year}, Allen Institute for Artificial Intelligence"
author = "Allen Institute for Artificial Intelligence"
version = VERSION_SHORT
release = VERSION
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"myst_parser",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx_autodoc_typehints",
"sphinx_inline_tabs",
]
# Tell myst-parser to assign header anchors for h1-h3.
myst_heading_anchors = 3
suppress_warnings = ["myst.header"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
source_suffix = [".rst", ".md"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"docker": ("https://docker-py.readthedocs.io/en/stable/", None),
"requests": ("https://requests.readthedocs.io/en/stable/", None),
}
# By default, sort documented members by type within classes and modules.
autodoc_member_order = "bysource"
autodoc_default_options = {"show-inheritance": True, "undoc-members": True}
# Include default values when documenting parameter types.
typehints_defaults = "comma"
copybutton_prompt_text = r">>> |\.\.\. "
copybutton_prompt_is_regexp = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = f"beaker-py v{VERSION}"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
html_favicon = "_static/favicon.ico"
html_theme_options = {
"light_logo": "beaker-500px-transparent.png",
"dark_logo": "beaker-500px-transparent.png",
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/allenai/beaker-py",
"html": """
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
</svg>
""", # noqa: E501
"class": "",
},
],
}
# -- Hack to get rid of stupid warnings from sphinx_autodoc_typehints --------
class ShutupSphinxAutodocTypehintsFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
if "Cannot resolve forward reference" in record.msg:
return False
if "Failed guarded type import" in record.msg:
return False
return True
logging.getLogger("sphinx.sphinx_autodoc_typehints").addFilter(ShutupSphinxAutodocTypehintsFilter())
def autodoc_skip_member(app, what, name, obj, skip, options):
"""
Skip documenting these Pydantic-specific attributes.
"""
del app, what, obj, skip, options
exclude = name in {"model_config", "model_fields"}
return True if exclude else None
def setup(app):
app.connect("autodoc-skip-member", autodoc_skip_member)
| beaker-py-main | docs/source/conf.py |
"""
This script will upload an image to Beaker and then submit a bunch
of experiments with different inputs. It will wait for all experiments to finish
and then collect the results.
See the output of 'python run.py --help' for usage.
"""
import argparse
import uuid
import petname
from rich import print, progress, table, traceback
from beaker import *
def unique_name() -> str:
"""Helper function to generate a unique name for the image, group, and each experiment."""
return petname.generate() + "-" + str(uuid.uuid4())[:8] # type: ignore
def main(image: str, workspace: str):
beaker = Beaker.from_env(default_workspace=workspace)
sweep_name = unique_name()
print(f"Starting sweep '{sweep_name}'...\n")
# Using the `beaker.session()` context manager is not necessary, but it does
# speed things up since it allows the Beaker client to reuse the same TCP connection
# for all requests made within-context.
with beaker.session():
# Upload image to Beaker.
print(f"Uploading image '{image}' to Beaker...")
beaker_image = beaker.image.create(unique_name(), image)
print(
f"Image uploaded as '{beaker_image.full_name}', view at {beaker.image.url(beaker_image)}\n"
)
# Launch experiments.
experiments = []
for x in progress.track(range(5), description="Launching experiments..."):
spec = ExperimentSpec.new(
description=f"Run {x+1} of sweep {sweep_name}",
beaker_image=beaker_image.full_name,
result_path="/output",
priority=Priority.preemptible,
arguments=[str(x)],
)
experiment = beaker.experiment.create(f"{sweep_name}-{x+1}", spec)
experiments.append(experiment)
print()
# Create group.
print("Creating group for sweep...")
group = beaker.group.create(
sweep_name, *experiments, description="Group for sweep {sweep_name}"
)
print(f"Group '{group.full_name}' created, view at {beaker.group.url(group)}\n")
# Wait for experiments to finish.
print("Waiting for experiments to finalize...\n")
experiments = beaker.experiment.wait_for(*experiments)
print()
# Display results as a table.
results_table = table.Table(title="Results for sweep")
results_table.add_column("Input")
results_table.add_column("Output")
for x, experiment in enumerate(
progress.track(experiments, description="Gathering results...")
):
metrics = beaker.experiment.metrics(experiment)
assert metrics is not None
results_table.add_row(f"x = {x}", f"{metrics['result']:.4f}")
print()
print(results_table)
if __name__ == "__main__":
traceback.install()
parser = argparse.ArgumentParser(description="Run a hyperparameter sweep in Beaker")
parser.add_argument(
"image", type=str, help="""The tag of the local Docker image built from the Dockerfile."""
)
parser.add_argument("workspace", type=str, help="""The Beaker workspace to use.""")
opts = parser.parse_args()
main(image=opts.image, workspace=opts.workspace)
| beaker-py-main | examples/sweep/run.py |
"""
This is the script that will run on Beaker as the Docker image's "entrypoint".
All it does is write out a simple JSON file with a random number in it to
the experiment's result directory. This is just meant to simulate the results
of a training/evaluation pipeline.
"""
import json
import random
import sys
# NOTE: it's important that this file is called 'metrics.json'. That tells Beaker
# to collect metrics for the task from this file.
OUTPUT_PATH = "/output/metrics.json"
def main(x: int):
random.seed(x)
with open(OUTPUT_PATH, "w") as out_file:
json.dump({"result": random.random()}, out_file)
if __name__ == "__main__":
main(int(sys.argv[1]))
| beaker-py-main | examples/sweep/entrypoint.py |
from datetime import datetime
from pathlib import Path
from beaker.version import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int = -1
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unreleased"):
insert_index = i + 1
elif line.startswith(f"## [v{VERSION}]"):
print("CHANGELOG already up-to-date")
return
elif line.startswith("## [v"):
break
if insert_index < 0:
raise RuntimeError("Couldn't find 'Unreleased' section")
lines.insert(insert_index, "\n")
lines.insert(
insert_index + 1,
f"## [v{VERSION}](https://github.com/allenai/beaker-py/releases/tag/v{VERSION}) - "
f"{datetime.now().strftime('%Y-%m-%d')}\n",
)
with changelog.open("w") as f:
f.writelines(lines)
if __name__ == "__main__":
main()
| beaker-py-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
stream = os.popen(
f"git log $(git describe --always --tags --abbrev=0 {TAG}^^)..{TAG} --oneline --pretty='%h %s'"
)
return "## Commits\n\n" + stream.read()
def main():
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| beaker-py-main | scripts/release_notes.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="[email protected]",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
"boto3"
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/[email protected]"],
},
package_data={"cc_net": ["data/*"]},
)
| cc_net-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import os
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import re
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net import s3util
from cc_net import ai2_format
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
s3_output_path: Upload outputs to S3 under this prefix
s3_input_path: Fetch inputs from S3 under this prefix
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hash_in_mem: number of shards hashes to use for dedup
hash_max_ram_gb: amount of RAM to allocate for deduping (compute hash_in_mem dynamically)
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
regroup: regroup shard outputs into consolidated files
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel for core pipeline steps
hash_parallelism: max number of tasks when computing hashes
regroup_parallelism: max number of tasks when regrouping segments
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
s3_output_path: Optional[str] = None
s3_input_path: Optional[str] = None
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
hash_max_ram_gb: int = 20
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
regroup: bool = True
cleanup_after_regroup: bool = True
task_parallelism: int = -1
hash_parallelism: int = -1
regroup_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1, parallelism: int = -1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=parallelism
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def num_hash_files_per_shard(self, gb_per_file: float):
if self.hash_max_ram_gb > 0:
return int(1 + self.hash_max_ram_gb // gb_per_file)
return self.hash_in_mem
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
def input_exists(self, path: Path, s3_path: Optional[str]) -> bool:
if s3_path:
if s3util.exists(f"{s3_path}/{path.relative_to(self.output_dir)}"):
return True
return path.exists()
def fetch_input(self, path: Path):
if not path.exists() and self.input_exists(path, self.s3_input_path):
print(f"Downloading {path} from {self.s3_input_path}")
s3util.download(f"{self.s3_input_path}/{path.relative_to(self.output_dir)}", path)
def upload_output(self, output: Path):
if self.s3_output_path:
s3util.upload(output, f"{self.s3_output_path}/{output.relative_to(self.output_dir)}")
print(f"Uploading {output} to {self.s3_output_path}")
index = output.parent / (output.name + ".index")
if index.exists():
s3util.upload(index, f"{self.s3_output_path}/{index.relative_to(self.output_dir)}")
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
for o in outputs:
conf.fetch_input(o)
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2, parallelism=conf.hash_parallelism)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
conf.upload_output(output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in range(conf.num_shards)
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
for o in outputs:
conf.fetch_input(o)
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
parallelism=conf.task_parallelism
)
# Compute hashes firsts.
if "dedup" in conf.pipeline or "hashes" in conf.pipeline:
hash_files = hashes(conf)
file_size = max(os.path.getsize(str(f)) for f in hash_files)
ram_per_disk = 3.0 # File of size N takes consumes about 3N in RAM
gb_per_file = file_size * ram_per_disk / 1024**3
print(f"Typical file size is {file_size / 1024**2:.0f}MB, requiring {gb_per_file:.1f}GB RAM ")
shards_per_group = conf.num_hash_files_per_shard(gb_per_file)
print(f"Deduping with {shards_per_group} hash files per shard")
hashes_groups = list(jsonql.grouper(hash_files, shards_per_group))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // shards_per_group] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
# "hashes" == compute hashes, nothing else
if "hashes" in conf.pipeline:
return []
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
split_by_language_fn = lambda doc: str(tmp_output / f"{doc['language']}_{doc['bucket']}.json.gz")
if "ai2_format" in conf.pipeline:
split_by_language_fn = lambda doc: str(tmp_output / f"{doc['metadata']['language']}_{doc['metadata']['bucket']}.json.gz")
steps["split_by_lang"] = jsonql.split(
split_fn=split_by_language_fn, mkdir=True
)
split_by_segment_fn = lambda doc: _get_segment(tmp_output, doc)
if "ai2_format" in conf.pipeline:
split_by_segment_fn = lambda doc: _get_segment(tmp_output, doc["metadata"])
steps["split_by_segment"] = jsonql.split(
split_fn=split_by_segment_fn, mkdir=True
)
steps["ai2_format"] = ai2_format.Ai2Formatter()
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
conf.upload_output(output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2, parallelism=conf.regroup_parallelism)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
conf.upload_output(output)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2, parallelism=conf.regroup_parallelism)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.regroup and conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
| cc_net-main | cc_net/mine.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
| cc_net-main | cc_net/get_wiki_cirrus.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
from cc_net import s3util
from cc_net.s3util import RetryableDownloadFailure
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers):]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start: start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i + 1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://") or filename.startswith("s3://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
self._open_next_handle() # Opening 1st handle allows to write directly.
def write(self, content) -> int:
# Avoid splitting newlines to a new file.
# use current_block_size since it's faster than `tell()`
if content != "\n" and self.current_block_size >= self.max_size:
self._open_next_handle()
if self.current_handle is None:
raise Exception("No more files to write to...")
written = self.current_handle.write(content)
self.current_block_size += written
return written
def _open_next_handle(self) -> bool:
self.close()
file = next(self.files, None)
if file is None:
return False
self.current_handle = open_write(file).__enter__()
self.current_block_size = 0
return True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def closed(self):
return self.current_handle is None
def close(self):
if self.current_handle is None:
return
# log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
self.current_handle.__exit__(None, None, None)
self.current_handle = None
# not sure it helps since connections are reseted anyway.
_session = functools.lru_cache()(requests.Session)
def get_content(primary_url: str, mirror_urls: List[str] = [], n_retry: int = 3):
t0 = time.time()
n_attempts = n_retry * (1 + len(mirror_urls))
for i,url in enumerate(([primary_url] + mirror_urls) * n_retry):
try:
logging.info(f"Attempting download of {url}")
bytes = try_get_content(url)
break
except RetryableDownloadFailure as e:
if i >= n_attempts - 1:
raise e.err
# Sleep and try again on error
warnings.warn(
f"Swallowed error {e.err} while downloading {url} ({i} out of {n_retry})"
)
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
dl_speed = len(bytes) / dl_time / 1024
logging.info(
f"Downloaded {url} took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
)
return bytes
def try_get_content(url: str) -> bytes:
if url.startswith("s3://"):
return s3util.try_get_content(url)
return try_get_http_content(url)
def try_get_http_content(url: str) -> bytes:
"""Retrieve the binary content at url."""
try:
r = _session().get(url)
r.raise_for_status()
except requests.exceptions.RequestException as e:
message = e.args[0] if isinstance(e.args[0], str) else ""
if "Client Error" in message:
raise e
raise RetryableDownloadFailure(e)
return r.content
def open_remote_file(url: str, cache: Path = None, mirror_urls: List[str] = []) -> Iterable[str]:
"""Download the files at the given url to memory and opens it as a file.
Assumes that the file is small, and fetch it when this function is called.
"""
if cache and cache.exists():
return open_read(cache)
# TODO: open the remote file in streaming mode.
# The hard part is that we need to write the content on disk at the same time,
# to implement disk caching.
raw_bytes = get_content(url, mirror_urls)
content = io.BytesIO(raw_bytes)
if url.endswith(".gz"):
f: TextIO = gzip.open(content, mode="rt") # type: ignore
else:
f = io.TextIOWrapper(content)
if cache and not cache.exists():
# The file might have been created while downloading/writing.
tmp_cache = _tmp(cache)
tmp_cache.write_bytes(raw_bytes)
if not cache.exists():
tmp_cache.replace(cache)
else:
tmp_cache.unlink()
return _close_when_exhausted(f)
def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
folder, name = file_pattern.parent, file_pattern.name
assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
n = name.count("?")
assert 0 < n < 8
assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
assert "r" not in mode
files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
return MultiFile(files, mode, max_size)
class SplitFile:
def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
assert mode == "r"
size = os.path.getsize(filename)
self.handle = open(filename, mode)
start = chunk * size // n_chunks
self.end: int = (chunk + 1) * size // n_chunks
if start > 0:
self.handle.seek(start - 1)
# Skip incomplete line. This avoid crashing when reading eg the middle
# of a unicode char. `self.handle.buffer` is a binary file reader.
self.handle.buffer.readline() # type: ignore
def __enter__(self):
return self
def __iter__(self):
while True:
line = self.handle.readline()
if not line:
return
yield line
if self.handle.tell() >= self.end:
return
def readlines(self):
return list(self.__iter__())
def close(self):
self.handle.close()
def __exit__(self, *args):
self.close()
def get_block_readers(filename: Path, n_readers, mode="t"):
index_filename = filename.parent / (filename.name + ".index")
if not index_filename.exists():
return [gzip.open(filename, "r" + mode)]
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
start = 0
readers = []
for i in range(n_readers):
end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
r = _blocked_gzip_reader(filename, start, end, mode)
readers.append(r)
start = end
return readers
def block_reader(filename: Path) -> Iterable[str]:
root, pattern = str(filename)[:-1].split("[", 1)
assert root.endswith(".gz"), "Can only read block of a .gz file for now."
ii, nn = pattern.strip().split("/")
i, n_readers = int(ii), int(nn)
index_filename = root + ".index"
assert os.path.exists(
index_filename
), f"Index {index_filename} not found for {filename}"
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
# I'm not sure how to handle the case where there is less reader than expected.
# Currently we return empty readers.
start = 0
if i > 0:
start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
end = index[min(i * chunk_per_reader, n_chunks - 1)]
return _blocked_gzip_reader(root, start, end, mode="t")
def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
handle = gzip.open(filename, "r" + mode)
handle.seek(start)
try:
while handle.tell() < end:
line = handle.readline()
if not line:
break
yield line
finally:
handle.close()
class BlockedGzipWriter(MultiFile):
"""Writes a Gzip files which can be read by block.
Decreasing the block size may hurt compression, but provides more split points.
"""
def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
assert "w" in mode
self.filename = Path(filename)
self.index: List[int] = []
self.zipfile: Optional[gzip.GzipFile] = None
super().__init__([], mode, block_size)
def _open_next_handle(self) -> bool:
"""Here we never actually close/open handles,
we just write the end of block sequence."""
if not self.current_handle:
mode = self.mode + "t"
self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
assert isinstance(self.current_handle.buffer, gzip.GzipFile)
self.zipfile = self.current_handle.buffer
return True
# Use Z_FULL_FLUSH to allow random access:
# https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
self.index.append(self.current_handle.tell())
self.current_block_size = 0
return True
def flush(self):
assert self.current_handle is not None
self.current_handle.flush()
def close(self):
if self.current_handle is None:
return
self.current_handle.flush()
self.index.append(self.current_handle.tell())
self.current_handle.close()
self.current_handle = None
index = np.array(self.index, dtype=np.uint64)
with open(str(self.filename) + ".index", "wb") as o:
np.save(o, index)
def grouper(iterable, n):
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if group:
yield group
PROCESS = psutil.Process()
def mem_footprint_gb(pid=None):
rss = PROCESS.memory_info().rss
return rss / 1_000_000_000
def _tmp(output: Path) -> Path:
suffix = "".join(output.suffixes)
suffix = ".tmp" + suffix
prefix = output.name[: -len(suffix)]
_, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
return Path(tmp_path)
@functools.lru_cache()
def _tmp_dir() -> Path:
job_id = os.environ.get("SLURM_JOB_ID")
if job_id:
return Path("/scratch/slurm_tmpdir") / job_id
checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
if checkpoint.exists():
tmp = checkpoint / "tmp"
tmp.mkdir(exist_ok=True)
return tmp
return Path("/tmp")
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
main(sys.argv[1:])
| cc_net-main | cc_net/jsonql.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
ex.parameters['timeout_min'] = int(timeout_hour * 60)
if ex.cluster == "local":
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
| cc_net-main | cc_net/execution.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
| cc_net-main | cc_net/flat_hash_set.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
| cc_net-main | cc_net/minify.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
| cc_net-main | cc_net/text_normalizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
| cc_net-main | cc_net/regroup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
| cc_net-main | cc_net/perplexity.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
| cc_net-main | cc_net/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
| cc_net-main | cc_net/tokenizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
| cc_net-main | cc_net/dedup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://data.commoncrawl.org"
WET_URL_MIRROR_ROOTS = ["s3://commoncrawl"]
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
length = int((headers[8] if headers[8].startswith("Content-Length") else headers[9]).split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = "/".join((WET_URL_ROOT, segment))
mirror_urls = ["/".join((root, segment)) for root in WET_URL_MIRROR_ROOTS]
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file, mirror_urls=mirror_urls)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
| cc_net-main | cc_net/process_wet_file.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.