python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""
download pretrained weights to ./weights
wget https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth
wget https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth
"""
import sys
sys.path.append("maskcut")
import numpy as np
import PIL.Image as Image
import torch
from scipy import ndimage
from colormap import random_color
import dino
from third_party.TokenCut.unsupervised_saliency_detection import metric
from crf import densecrf
from maskcut import maskcut
from cog import BasePredictor, Input, Path
class Predictor(BasePredictor):
def setup(self):
"""Load the model into memory to make running multiple predictions efficient"""
# DINO pre-trained model
vit_features = "k"
self.patch_size = 8
# adapted dino.ViTFeat to load from local pretrained_path
self.backbone_base = dino.ViTFeat(
"weights/dino_vitbase8_pretrain.pth",
768,
"base",
vit_features,
self.patch_size,
)
self.backbone_small = dino.ViTFeat(
"weights/dino_deitsmall8_300ep_pretrain.pth",
384,
"small",
vit_features,
self.patch_size,
)
self.backbone_base.eval()
self.backbone_base.cuda()
self.backbone_small.eval()
self.backbone_small.cuda()
def predict(
self,
image: Path = Input(
description="Input image",
),
model: str = Input(
description="Choose the model architecture",
default="base",
choices=["small", "base"]
),
n_pseudo_masks: int = Input(
description="The maximum number of pseudo-masks per image",
default=3,
),
tau: float = Input(
description="Threshold used for producing binary graph",
default=0.15,
),
) -> Path:
"""Run a single prediction on the model"""
backbone = self.backbone_base if model == "base" else self.backbone_small
# MaskCut hyperparameters
fixed_size = 480
# get pesudo-masks with MaskCut
bipartitions, _, I_new = maskcut(
str(image),
backbone,
self.patch_size,
tau,
N=n_pseudo_masks,
fixed_size=fixed_size,
cpu=False,
)
I = Image.open(str(image)).convert("RGB")
width, height = I.size
pseudo_mask_list = []
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask >= 0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
mask1 = torch.from_numpy(bipartition).cuda()
mask2 = torch.from_numpy(pseudo_mask).cuda()
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask * 255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
pseudo_mask = pseudo_mask.astype(np.uint8)
upper = np.max(pseudo_mask)
lower = np.min(pseudo_mask)
thresh = upper / 2.0
pseudo_mask[pseudo_mask > thresh] = upper
pseudo_mask[pseudo_mask <= thresh] = lower
pseudo_mask_list.append(pseudo_mask)
out = np.array(I)
for pseudo_mask in pseudo_mask_list:
out = vis_mask(out, pseudo_mask, random_color(rgb=True))
output_path = f"/tmp/out.png"
out.save(str(output_path))
return Path(output_path)
def vis_mask(input, mask, mask_color):
fg = mask > 0.5
rgb = np.copy(input)
rgb[fg] = (rgb[fg] * 0.3 + np.array(mask_color) * 0.7).astype(np.uint8)
return Image.fromarray(rgb)
| CutLER-main | maskcut/predict.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
"""
A script to run multinode training with submitit.
"""
import sys
sys.path.append('./')
sys.path.append('./MaskCut')
sys.path.append('./third_party')
import argparse
import os
import uuid
from pathlib import Path
import maskcut_with_submitit as main_func
import submitit
import copy
def parse_args():
parent_parser = main_func.get_args_parser()
parser = argparse.ArgumentParser("Submitit for MaskCut", parents=[parent_parser])
parser.add_argument("--ngpus", default=1, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=1, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=1400, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
# Removed the followings if the main file has it already
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--rank', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--tolerance', default=1, type=int, help='tolerance for finding contours')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments/maskcut/")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
# Using a for loop for getting the array job and submit all jobs in one single array
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
self._setup_gpu_args()
main_func.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node, # 40
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=8, # default 8
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="MaskCut")
# Since it is often necessary to submit over 100 jobs simutanously,
# using an array to submit these jobs is a more efficient way.
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
print(args.output_dir)
# list_folders = list(range(0, 500))
end_idx = (1000 - args.job_index) // args.num_folder_per_job + 1
list_folders = list(range(args.job_index, end_idx))
jobs = []
args_list = []
for folder_index in list_folders:
args_copy = copy.deepcopy(args)
args_copy.job_index = folder_index
args_list.append(args_copy)
with executor.batch():
for args in args_list:
trainer = Trainer(args)
job = executor.submit(trainer)
jobs.append(job)
for job in jobs:
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main() | CutLER-main | maskcut/run_with_submitit_maskcut_array.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# merge all ImageNet annotation files as a single one.
import os
import json
import argparse
if __name__ == "__main__":
# load model arguments
parser = argparse.ArgumentParser(description='Merge json files')
parser.add_argument('--base-dir', type=str,
default='annotations/',
help='Dir to the generated annotation files with MaskCut')
parser.add_argument('--save-path', type=str, default="imagenet_train_fixsize480_tau0.15_N3.json",
help='Path to save the merged annotation file')
# following arguments should be consistent with maskcut.py or maskcut_with_submitit.py (if use submitit)
parser.add_argument('--num-folder-per-job', type=int, default=1,
help='Number of folders per json file')
parser.add_argument('--fixed-size', type=int, default=480,
help='rescale the input images to a fixed size')
parser.add_argument('--tau', type=float, default=0.15, help='threshold used for producing binary graph')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
args = parser.parse_args()
base_name = 'imagenet_train_fixsize{}_tau{}_N{}'.format(args.fixed_size, args.tau, args.N)
start_idx = 0
every_k = args.num_folder_per_job
missed_folders = []
tobe_merged_ann_dicts = []
# check if pseudo-masks for all 1000 ImageNet-1K folders are avaliable.
while start_idx < 1000:
end_idx = start_idx + every_k
filename = "{}_{}_{}.json".format(base_name, start_idx, end_idx)
tobe_merged = os.path.join(args.base_dir, filename)
if not os.path.isfile(tobe_merged):
end_idx = start_idx + 1
tobe_merged_ = "{}_{}_{}.json".format(base_name, start_idx, end_idx)
if not os.path.isfile(tobe_merged_):
missed_folders.append(start_idx)
start_idx += 1
continue
else:
tobe_merged = tobe_merged_
start_idx += 1
else:
start_idx += every_k
tobe_merged_ann_dict = json.load(open(tobe_merged))
tobe_merged_ann_dicts.append(tobe_merged_ann_dict)
print("Warning: these folders are not found: ", missed_folders)
# filter out repeated image info
for idx, ann_dict in enumerate(tobe_merged_ann_dicts):
images = []
images_ids = []
for image in ann_dict['images']:
if image['id'] in images_ids:
continue
else:
images.append(image)
images_ids.append(image['id'])
ann_dict['images'] = images
# re-generate image_id and segment_id, and combine annotation info and image info
# from all annotation files
base_ann_dict = tobe_merged_ann_dicts[0]
image_id = base_ann_dict['images'][-1]['id'] + 1
segment_id = base_ann_dict['annotations'][-1]['id'] + 1
segment_id_list = [ann['id'] for ann in base_ann_dict['annotations']]
for tobe_merged_ann_dict in tobe_merged_ann_dicts[1:]:
file_name_and_id = {}
for i, image in enumerate(tobe_merged_ann_dict['images']):
file_name_and_id[str(image['id'])] = image_id
image['id'] = image_id
base_ann_dict['images'].append(image)
image_id = image_id + 1
for i, annotation_info in enumerate(tobe_merged_ann_dict['annotations']):
annotation_info["image_id"] = file_name_and_id[str(annotation_info["image_id"])]
annotation_info["id"] = segment_id
annotation_info["iscrowd"] = 0
segment_id_list.append(segment_id)
base_ann_dict['annotations'].append(annotation_info)
segment_id = segment_id + 1
segment_id = 1
for ann in base_ann_dict['annotations']:
ann["id"] = segment_id
segment_id += 1
# save the final json file.
anns = [ann['id'] for ann in base_ann_dict['annotations']]
anns_image_id = [ann['image_id'] for ann in base_ann_dict['annotations']]
json.dump(base_ann_dict, open(args.save_path, 'w'))
print("Done: {} images; {} anns.".format(len(base_ann_dict['images']), len(base_ann_dict['annotations'])))
| CutLER-main | maskcut/merge_jsons.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import sys
sys.path.append('../')
import argparse
import numpy as np
from tqdm import tqdm
import re
import datetime
import PIL
import PIL.Image as Image
import torch
import torch.nn.functional as F
from torchvision import transforms
from pycocotools import mask
import pycocotools.mask as mask_util
from scipy import ndimage
from scipy.linalg import eigh
import json
import dino
# modfied by Xudong Wang based on third_party/TokenCut
from third_party.TokenCut.unsupervised_saliency_detection import utils, metric
from third_party.TokenCut.unsupervised_saliency_detection.object_discovery import detect_box
# bilateral_solver codes are modfied based on https://github.com/poolio/bilateral_solver/blob/master/notebooks/bilateral_solver.ipynb
# from third_party.TokenCut.unsupervised_saliency_detection.bilateral_solver import BilateralSolver, BilateralGrid
# crf codes are are modfied based on https://github.com/lucasb-eyer/pydensecrf/blob/master/pydensecrf/tests/test_dcrf.py
from crf import densecrf
# Image transformation applied to all images
ToTensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),])
def get_affinity_matrix(feats, tau, eps=1e-5):
# get affinity matrix via measuring patch-wise cosine similarity
feats = F.normalize(feats, p=2, dim=0)
A = (feats.transpose(0,1) @ feats).cpu().numpy()
# convert the affinity matrix to a binary one.
A = A > tau
A = np.where(A.astype(float) == 0, eps, A)
d_i = np.sum(A, axis=1)
D = np.diag(d_i)
return A, D
def second_smallest_eigenvector(A, D):
# get the second smallest eigenvector from affinity matrix
_, eigenvectors = eigh(D-A, D, subset_by_index=[1,2])
eigenvec = np.copy(eigenvectors[:, 0])
second_smallest_vec = eigenvectors[:, 0]
return eigenvec, second_smallest_vec
def get_salient_areas(second_smallest_vec):
# get the area corresponding to salient objects.
avg = np.sum(second_smallest_vec) / len(second_smallest_vec)
bipartition = second_smallest_vec > avg
return bipartition
def check_num_fg_corners(bipartition, dims):
# check number of corners belonging to the foreground
bipartition_ = bipartition.reshape(dims)
top_l, top_r, bottom_l, bottom_r = bipartition_[0][0], bipartition_[0][-1], bipartition_[-1][0], bipartition_[-1][-1]
nc = int(top_l) + int(top_r) + int(bottom_l) + int(bottom_r)
return nc
def get_masked_affinity_matrix(painting, feats, mask, ps):
# mask out affinity matrix based on the painting matrix
dim, num_patch = feats.size()[0], feats.size()[1]
painting = painting + mask.unsqueeze(0)
painting[painting > 0] = 1
painting[painting <= 0] = 0
feats = feats.clone().view(dim, ps, ps)
feats = ((1 - painting) * feats).view(dim, num_patch)
return feats, painting
def maskcut_forward(feats, dims, scales, init_image_size, tau=0, N=3, cpu=False):
"""
Implementation of MaskCut.
Inputs
feats: the pixel/patche features of an image
dims: dimension of the map from which the features are used
scales: from image to map scale
init_image_size: size of the image
tau: thresold for graph construction
N: number of pseudo-masks per image.
"""
bipartitions = []
eigvecs = []
for i in range(N):
if i == 0:
painting = torch.from_numpy(np.zeros(dims))
if not cpu: painting = painting.cuda()
else:
feats, painting = get_masked_affinity_matrix(painting, feats, current_mask, ps)
# construct the affinity matrix
A, D = get_affinity_matrix(feats, tau)
# get the second smallest eigenvector
eigenvec, second_smallest_vec = second_smallest_eigenvector(A, D)
# get salient area
bipartition = get_salient_areas(second_smallest_vec)
# check if we should reverse the partition based on:
# 1) peak of the 2nd smallest eigvec 2) object centric bias
seed = np.argmax(np.abs(second_smallest_vec))
nc = check_num_fg_corners(bipartition, dims)
if nc >= 3:
reverse = True
else:
reverse = bipartition[seed] != 1
if reverse:
# reverse bipartition, eigenvector and get new seed
eigenvec = eigenvec * -1
bipartition = np.logical_not(bipartition)
seed = np.argmax(eigenvec)
else:
seed = np.argmax(second_smallest_vec)
# get pxiels corresponding to the seed
bipartition = bipartition.reshape(dims).astype(float)
_, _, _, cc = detect_box(bipartition, seed, dims, scales=scales, initial_im_size=init_image_size)
pseudo_mask = np.zeros(dims)
pseudo_mask[cc[0],cc[1]] = 1
pseudo_mask = torch.from_numpy(pseudo_mask)
if not cpu: pseudo_mask = pseudo_mask.to('cuda')
ps = pseudo_mask.shape[0]
# check if the extra mask is heavily overlapped with the previous one or is too small.
if i >= 1:
ratio = torch.sum(pseudo_mask) / pseudo_mask.size()[0] / pseudo_mask.size()[1]
if metric.IoU(current_mask, pseudo_mask) > 0.5 or ratio <= 0.01:
pseudo_mask = np.zeros(dims)
pseudo_mask = torch.from_numpy(pseudo_mask)
if not cpu: pseudo_mask = pseudo_mask.to('cuda')
current_mask = pseudo_mask
# mask out foreground areas in previous stages
masked_out = 0 if len(bipartitions) == 0 else np.sum(bipartitions, axis=0)
bipartition = F.interpolate(pseudo_mask.unsqueeze(0).unsqueeze(0), size=init_image_size, mode='nearest').squeeze()
bipartition_masked = bipartition.cpu().numpy() - masked_out
bipartition_masked[bipartition_masked <= 0] = 0
bipartitions.append(bipartition_masked)
# unsample the eigenvec
eigvec = second_smallest_vec.reshape(dims)
eigvec = torch.from_numpy(eigvec)
if not cpu: eigvec = eigvec.to('cuda')
eigvec = F.interpolate(eigvec.unsqueeze(0).unsqueeze(0), size=init_image_size, mode='nearest').squeeze()
eigvecs.append(eigvec.cpu().numpy())
return seed, bipartitions, eigvecs
def maskcut(img_path, backbone,patch_size, tau, N=1, fixed_size=480, cpu=False) :
I = Image.open(img_path).convert('RGB')
bipartitions, eigvecs = [], []
I_new = I.resize((int(fixed_size), int(fixed_size)), PIL.Image.LANCZOS)
I_resize, w, h, feat_w, feat_h = utils.resize_pil(I_new, patch_size)
tensor = ToTensor(I_resize).unsqueeze(0)
if not cpu: tensor = tensor.cuda()
feat = backbone(tensor)[0]
_, bipartition, eigvec = maskcut_forward(feat, [feat_h, feat_w], [patch_size, patch_size], [h,w], tau, N=N, cpu=cpu)
bipartitions += bipartition
eigvecs += eigvec
return bipartitions, eigvecs, I_new
def resize_binary_mask(array, new_size):
image = Image.fromarray(array.astype(np.uint8)*255)
image = image.resize(new_size)
return np.asarray(image).astype(np.bool_)
def close_contour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
def create_image_info(image_id, file_name, image_size,
date_captured=datetime.datetime.utcnow().isoformat(' '),
license_id=1, coco_url="", flickr_url=""):
"""Return image_info in COCO style
Args:
image_id: the image ID
file_name: the file name of each image
image_size: image size in the format of (width, height)
date_captured: the date this image info is created
license: license of this image
coco_url: url to COCO images if there is any
flickr_url: url to flickr if there is any
"""
image_info = {
"id": image_id,
"file_name": file_name,
"width": image_size[0],
"height": image_size[1],
"date_captured": date_captured,
"license": license_id,
"coco_url": coco_url,
"flickr_url": flickr_url
}
return image_info
def create_annotation_info(annotation_id, image_id, category_info, binary_mask,
image_size=None, bounding_box=None):
"""Return annotation info in COCO style
Args:
annotation_id: the annotation ID
image_id: the image ID
category_info: the information on categories
binary_mask: a 2D binary numpy array where '1's represent the object
file_name: the file name of each image
image_size: image size in the format of (width, height)
bounding_box: the bounding box for detection task. If bounding_box is not provided,
we will generate one according to the binary mask.
"""
upper = np.max(binary_mask)
lower = np.min(binary_mask)
thresh = upper / 2.0
binary_mask[binary_mask > thresh] = upper
binary_mask[binary_mask <= thresh] = lower
if image_size is not None:
binary_mask = resize_binary_mask(binary_mask.astype(np.uint8), image_size)
binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8)))
area = mask.area(binary_mask_encoded)
if area < 1:
return None
if bounding_box is None:
bounding_box = mask.toBbox(binary_mask_encoded)
rle = mask_util.encode(np.array(binary_mask[...,None], order="F", dtype="uint8"))[0]
rle['counts'] = rle['counts'].decode('ascii')
segmentation = rle
annotation_info = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_info["id"],
"iscrowd": 0,
"area": area.tolist(),
"bbox": bounding_box.tolist(),
"segmentation": segmentation,
"width": binary_mask.shape[1],
"height": binary_mask.shape[0],
}
return annotation_info
# necessay info used for coco style annotations
INFO = {
"description": "ImageNet-1K: pseudo-masks with MaskCut",
"url": "https://github.com/facebookresearch/CutLER",
"version": "1.0",
"year": 2023,
"contributor": "Xudong Wang",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Apache License",
"url": "https://github.com/facebookresearch/CutLER/blob/main/LICENSE"
}
]
# only one class, i.e. foreground
CATEGORIES = [
{
'id': 1,
'name': 'fg',
'supercategory': 'fg',
},
]
convert = lambda text: int(text) if text.isdigit() else text.lower()
natrual_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []}
category_info = {
"is_crowd": 0,
"id": 1
}
if __name__ == "__main__":
parser = argparse.ArgumentParser('MaskCut script')
# default arguments
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--vit-arch', type=str, default='small', choices=['base', 'small'], help='which architecture')
parser.add_argument('--vit-feat', type=str, default='k', choices=['k', 'q', 'v', 'kqv'], help='which features')
parser.add_argument('--patch-size', type=int, default=16, choices=[16, 8], help='patch size')
parser.add_argument('--nb-vis', type=int, default=20, choices=[1, 200], help='nb of visualization')
parser.add_argument('--img-path', type=str, default=None, help='single image visualization')
# additional arguments
parser.add_argument('--dataset-path', type=str, default="imagenet/train/", help='path to the dataset')
parser.add_argument('--tau', type=float, default=0.2, help='threshold used for producing binary graph')
parser.add_argument('--num-folder-per-job', type=int, default=1, help='the number of folders each job processes')
parser.add_argument('--job-index', type=int, default=0, help='the index of the job (for imagenet: in the range of 0 to 1000/args.num_folder_per_job-1)')
parser.add_argument('--fixed_size', type=int, default=480, help='rescale the input images to a fixed size')
parser.add_argument('--pretrain_path', type=str, default=None, help='path to pretrained model')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
parser.add_argument('--cpu', action='store_true', help='use cpu')
args = parser.parse_args()
if args.pretrain_path is not None:
url = args.pretrain_path
if args.vit_arch == 'base' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
feat_dim = 768
elif args.vit_arch == 'small' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
feat_dim = 384
backbone = dino.ViTFeat(url, feat_dim, args.vit_arch, args.vit_feat, args.patch_size)
msg = 'Load {} pre-trained feature...'.format(args.vit_arch)
print (msg)
backbone.eval()
if not args.cpu:
backbone.cuda()
img_folders = os.listdir(args.dataset_path)
if args.out_dir is not None and not os.path.exists(args.out_dir) :
os.mkdir(args.out_dir)
start_idx = max(args.job_index*args.num_folder_per_job, 0)
end_idx = min((args.job_index+1)*args.num_folder_per_job, len(img_folders))
image_id, segmentation_id = 1, 1
image_names = []
for img_folder in img_folders[start_idx:end_idx]:
args.img_dir = os.path.join(args.dataset_path, img_folder)
img_list = sorted(os.listdir(args.img_dir))
for img_name in tqdm(img_list) :
# get image path
img_path = os.path.join(args.img_dir, img_name)
# get pseudo-masks for each image using MaskCut
try:
bipartitions, _, I_new = maskcut(img_path, backbone, args.patch_size, \
args.tau, N=args.N, fixed_size=args.fixed_size, cpu=args.cpu)
except:
print(f'Skipping {img_name}')
continue
I = Image.open(img_path).convert('RGB')
width, height = I.size
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask>=0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
mask1 = torch.from_numpy(bipartition)
mask2 = torch.from_numpy(pseudo_mask)
if not args.cpu:
mask1 = mask1.cuda()
mask2 = mask2.cuda()
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask*255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
# create coco-style image info
if img_name not in image_names:
image_info = create_image_info(
image_id, "{}/{}".format(img_folder, img_name), (height, width, 3))
output["images"].append(image_info)
image_names.append(img_name)
# create coco-style annotation info
annotation_info = create_annotation_info(
segmentation_id, image_id, category_info, pseudo_mask.astype(np.uint8), None)
if annotation_info is not None:
output["annotations"].append(annotation_info)
segmentation_id += 1
image_id += 1
# save annotations
if len(img_folders) == args.num_folder_per_job and args.job_index == 0:
json_name = '{}/imagenet_train_fixsize{}_tau{}_N{}.json'.format(args.out_dir, args.fixed_size, args.tau, args.N)
else:
json_name = '{}/imagenet_train_fixsize{}_tau{}_N{}_{}_{}.json'.format(args.out_dir, args.fixed_size, args.tau, args.N, start_idx, end_idx)
with open(json_name, 'w') as output_json_file:
json.dump(output, output_json_file)
print(f'dumping {json_name}')
print("Done: {} images; {} anns.".format(len(output['images']), len(output['annotations']))) | CutLER-main | maskcut/maskcut.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
"""
Copied from Dino repo. https://github.com/facebookresearch/dino
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class ViTFeat(nn.Module):
""" Vision Transformer """
def __init__(self, pretrained_pth, feat_dim, vit_arch = 'base', vit_feat = 'k', patch_size=16):
super().__init__()
if vit_arch == 'base' :
self.model = vit_base(patch_size=patch_size, num_classes=0)
else :
self.model = vit_small(patch_size=patch_size, num_classes=0)
self.feat_dim = feat_dim
self.vit_feat = vit_feat
self.patch_size = patch_size
# state_dict = torch.load(pretrained_pth, map_location="cpu")
state_dict = torch.hub.load_state_dict_from_url(pretrained_pth)
self.model.load_state_dict(state_dict, strict=True)
print('Loading weight from {}'.format(pretrained_pth))
def forward(self, img) :
feat_out = {}
def hook_fn_forward_qkv(module, input, output):
feat_out["qkv"] = output
self.model._modules["blocks"][-1]._modules["attn"]._modules["qkv"].register_forward_hook(hook_fn_forward_qkv)
# Forward pass in the model
with torch.no_grad() :
h, w = img.shape[2], img.shape[3]
feat_h, feat_w = h // self.patch_size, w // self.patch_size
attentions = self.model.get_last_selfattention(img)
bs, nb_head, nb_token = attentions.shape[0], attentions.shape[1], attentions.shape[2]
qkv = (
feat_out["qkv"]
.reshape(bs, nb_token, 3, nb_head, -1)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2]
k = k.transpose(1, 2).reshape(bs, nb_token, -1)
q = q.transpose(1, 2).reshape(bs, nb_token, -1)
v = v.transpose(1, 2).reshape(bs, nb_token, -1)
# Modality selection
if self.vit_feat == "k":
feats = k[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
elif self.vit_feat == "q":
feats = q[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
elif self.vit_feat == "v":
feats = v[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
elif self.vit_feat == "kqv":
k = k[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
q = q[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
v = v[:, 1:].transpose(1, 2).reshape(bs, self.feat_dim, feat_h * feat_w)
feats = torch.cat([k, q, v], dim=1)
return feats
if __name__ == "__main__":
vit_arch = 'base'
vit_feat = 'k'
model = ViTFeat(vit_arch, vit_feat)
img = torch.cuda.FloatTensor(4, 3, 224, 224)
model.cuda()
# Forward pass in the model
feat = model(img)
print (feat.shape)
| CutLER-main | maskcut/dino.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import sys
sys.path.append('../')
import argparse
import numpy as np
import PIL.Image as Image
import torch
from torchvision import transforms
from scipy import ndimage
from detectron2.utils.colormap import random_color
import dino # model
from third_party.TokenCut.unsupervised_saliency_detection import metric
from crf import densecrf
from maskcut import maskcut
# Image transformation applied to all images
ToTensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)),])
def vis_mask(input, mask, mask_color) :
fg = mask > 0.5
rgb = np.copy(input)
rgb[fg] = (rgb[fg] * 0.3 + np.array(mask_color) * 0.7).astype(np.uint8)
return Image.fromarray(rgb)
if __name__ == "__main__":
parser = argparse.ArgumentParser('MaskCut Demo')
# default arguments
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--vit-arch', type=str, default='small', choices=['base', 'small'], help='which architecture')
parser.add_argument('--vit-feat', type=str, default='k', choices=['k', 'q', 'v', 'kqv'], help='which features')
parser.add_argument('--patch-size', type=int, default=8, choices=[16, 8], help='patch size')
parser.add_argument('--img-path', type=str, default=None, help='single image visualization')
parser.add_argument('--tau', type=float, default=0.15, help='threshold used for producing binary graph')
# additional arguments
parser.add_argument('--fixed_size', type=int, default=480, help='rescale the input images to a fixed size')
parser.add_argument('--pretrain_path', type=str, default=None, help='path to pretrained model')
parser.add_argument('--N', type=int, default=3, help='the maximum number of pseudo-masks per image')
parser.add_argument('--cpu', action='store_true', help='use cpu')
parser.add_argument('--output_path', type=str, default='', help='path to save outputs')
args = parser.parse_args()
print (args)
if args.pretrain_path is not None:
url = args.pretrain_path
if args.vit_arch == 'base' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
feat_dim = 768
elif args.vit_arch == 'small' and args.patch_size == 8:
if args.pretrain_path is None:
url = "https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
feat_dim = 384
backbone = dino.ViTFeat(url, feat_dim, args.vit_arch, args.vit_feat, args.patch_size)
msg = 'Load {} pre-trained feature...'.format(args.vit_arch)
print (msg)
backbone.eval()
if not args.cpu:
backbone.cuda()
bipartitions, _, I_new = maskcut(args.img_path, backbone, args.patch_size, args.tau, \
N=args.N, fixed_size=args.fixed_size, cpu=args.cpu)
I = Image.open(args.img_path).convert('RGB')
width, height = I.size
pseudo_mask_list = []
for idx, bipartition in enumerate(bipartitions):
# post-process pesudo-masks with CRF
pseudo_mask = densecrf(np.array(I_new), bipartition)
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask>=0.5)
# filter out the mask that have a very different pseudo-mask after the CRF
if not args.cpu:
mask1 = torch.from_numpy(bipartition).cuda()
mask2 = torch.from_numpy(pseudo_mask).cuda()
else:
mask1 = torch.from_numpy(bipartition)
mask2 = torch.from_numpy(pseudo_mask)
if metric.IoU(mask1, mask2) < 0.5:
pseudo_mask = pseudo_mask * -1
# construct binary pseudo-masks
pseudo_mask[pseudo_mask < 0] = 0
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask*255))
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
pseudo_mask = pseudo_mask.astype(np.uint8)
upper = np.max(pseudo_mask)
lower = np.min(pseudo_mask)
thresh = upper / 2.0
pseudo_mask[pseudo_mask > thresh] = upper
pseudo_mask[pseudo_mask <= thresh] = lower
pseudo_mask_list.append(pseudo_mask)
input = np.array(I)
for pseudo_mask in pseudo_mask_list:
input = vis_mask(input, pseudo_mask, random_color(rgb=True))
input.save(os.path.join(args.output_path, "demo.jpg")) | CutLER-main | maskcut/demo.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from distutils.util import convert_path
from shutil import rmtree
from setuptools import Command, find_packages, setup
main_ns = {}
ver_path = convert_path("manifest/version.py")
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
# Package meta-data.
NAME = "manifest-ml"
DESCRIPTION = "Manifest for Prompting Foundation Models."
URL = "https://github.com/HazyResearch/manifest"
EMAIL = "[email protected]"
AUTHOR = "Laurel Orr"
REQUIRES_PYTHON = ">=3.8.0"
VERSION = main_ns["__version__"]
# What packages are required for this module to be executed?
REQUIRED = [
"numpy>=1.20.0",
"pydantic>=1.9.0",
"redis>=4.3.1",
"requests>=2.27.1",
"aiohttp>=3.8.0",
"sqlitedict>=2.0.0",
"tenacity>=8.2.0",
"tiktoken>=0.3.0",
"xxhash>=3.0.0",
]
# What packages are optional?
EXTRAS = {
"api": [
"accelerate>=0.10.0",
"deepspeed>=0.7.0",
"diffusers>=0.6.0",
"Flask>=2.1.2",
"sentence_transformers>=2.2.0",
"torch>=1.8.0",
"transformers>=4.29.0,<4.31.0",
"tokenizers>=0.13.3",
],
"app": [
"fastapi>=0.70.0",
"uvicorn>=0.18.0",
],
"diffusers": [
"pillow>=9.0.0",
],
"gcp": [
"pg8000",
"cloud-sql-python-connector[pg8000]>=1.0.0",
"sqlalchemy",
],
"dev": [
"autopep8>=1.6.0",
"black>=22.3.0",
"isort>=5.9.3",
"flake8>=4.0.0",
"flake8-docstrings>=1.6.0",
"mypy>=0.950",
"pep8-naming>=0.12.1",
"docformatter>=1.4",
"pytest>=7.0.0",
"pytest-cov>=3.0.0",
"python-dotenv>=0.20.0",
"sphinx-rtd-theme>=0.5.1",
"nbsphinx>=0.8.0",
"recommonmark>=0.7.1",
"pre-commit>=2.14.0",
"types-redis>=4.2.6",
"types-requests>=2.27.29",
"types-PyYAML>=6.0.7",
"types-protobuf>=3.19.21",
"types-python-dateutil>=2.8.16",
"types-setuptools>=57.4.17",
"types-pillow>=9.0.0",
"types-xxhash>=3.0.0",
"sphinx-autobuild",
"twine",
],
}
EXTRAS["all"] = list(set(sum(EXTRAS.values(), [])))
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
rmtree(os.path.join(here, "build"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="Apache 2.0",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
# $ setup.py publish support.
cmdclass={
"upload": UploadCommand,
},
)
| manifest-main | setup.py |
"""Web application for Manifest."""
| manifest-main | web_app/__init__.py |
"""Pydantic models."""
from typing import List, Optional, Union
from pydantic import BaseModel
class ManifestCreate(BaseModel):
"""Create manifest Pydantic."""
# Prompt params
prompt: str
n: int = 1
max_tokens: int = 132
temperature: Optional[float] = None
top_k: Optional[int] = None
top_p: Optional[float] = None
# Manifest client params
client_name: str = "openai"
client_connection: Optional[str] = None
engine: str = "text-davinci-003"
cache_name: str = "noop"
cache_connection: Optional[str] = None
class ManifestResponse(BaseModel):
"""Manifest response Pydantic."""
response: Union[str, List[str]]
cached: bool
request_params: dict
| manifest-main | web_app/schemas.py |
"""Manifest as an app service."""
from typing import Any, Dict, cast
from fastapi import APIRouter, FastAPI, HTTPException
from manifest import Manifest
from manifest.response import Response as ManifestResponse
from web_app import schemas
app = FastAPI()
api_router = APIRouter()
@app.get("/")
async def root() -> Dict:
"""Root endpoint."""
return {"message": "Hello to the Manifest App"}
@api_router.post("/prompt/", status_code=201, response_model=schemas.ManifestResponse)
def prompt_manifest(*, manifest_in: schemas.ManifestCreate) -> Dict:
"""Prompt a manifest session and query."""
manifest = Manifest(
client_name=manifest_in.client_name,
client_connection=manifest_in.client_connection,
engine=manifest_in.engine,
cache_name=manifest_in.cache_name,
cache_connection=manifest_in.cache_connection,
)
manifest_prompt_args: Dict[str, Any] = {
"n": manifest_in.n,
"max_tokens": manifest_in.max_tokens,
}
if manifest_in.temperature:
manifest_prompt_args["temperature"] = manifest_in.temperature
if manifest_in.top_k:
manifest_prompt_args["top_k"] = manifest_in.top_k
if manifest_in.top_p:
manifest_prompt_args["top_p"] = manifest_in.top_p
try:
response = manifest.run(
prompt=manifest_in.prompt, return_response=True, **manifest_prompt_args
)
response = cast(ManifestResponse, response)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return {
"response": response.get_response(),
"cached": response.is_cached(),
"request_params": response.get_request_obj(),
}
app.include_router(api_router)
| manifest-main | web_app/main.py |
__version__ = "0.1.9"
| manifest-main | manifest/version.py |
"""Manifest class."""
import asyncio
import copy
import logging
from typing import (
Any,
Dict,
Generator,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
import numpy as np
from manifest.caches.noop import NoopCache
from manifest.caches.postgres import PostgresCache
from manifest.caches.redis import RedisCache
from manifest.caches.sqlite import SQLiteCache
from manifest.clients.client import Client
from manifest.clients.huggingface import HuggingFaceClient
from manifest.connections.client_pool import (
CLIENT_CONSTRUCTORS,
ClientConnection,
ClientConnectionPool,
)
from manifest.request import LMChatRequest, LMScoreRequest, Request
from manifest.response import ModelChoices, Response, Usage, Usages
logging.getLogger("openai").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
CACHE_CONSTRUCTORS = {
"redis": RedisCache,
"sqlite": SQLiteCache,
"noop": NoopCache,
"postgres": PostgresCache,
}
class Manifest:
"""Manifest session object."""
def __init__(
self,
client_name: Optional[str] = None,
client_connection: Optional[str] = None,
client_pool: Optional[List[ClientConnection]] = None,
client_pool_schedule: str = "round_robin",
cache_name: str = "noop",
cache_connection: Optional[str] = None,
stop_token: str = "",
**kwargs: Any,
):
"""
Initialize manifest.
Args:
client_name: name of client.
client_connection: connection string for client.
client_pool: list of client connections for multi-client.
client_pool_schedule: schedule for client pool.
cache_name: name of cache.
cache_connection: connection string for cache.
stop_token: stop token prompt generation.
Can be overridden in run
Remaining kwargs sent to client and cache.
"""
if not client_name and not client_pool:
raise ValueError(
"Must specify client_name or client_pool. "
f"Choices are {list(CLIENT_CONSTRUCTORS.keys())}"
)
if client_name and client_pool:
raise ValueError("Cannot specify both client_name and client_pool")
if client_name:
client_pool = [
ClientConnection(
client_name=client_name,
client_connection=client_connection,
# Remove engine from kwargs
engine=kwargs.pop("engine", None),
)
]
self.client_pool = ClientConnectionPool(
client_pool, client_pool_schedule, client_args=kwargs
)
if cache_name not in CACHE_CONSTRUCTORS:
raise ValueError(
f"Unknown cache name: {cache_name}. "
f"Choices are {list(CACHE_CONSTRUCTORS.keys())}"
)
# Must pass kwargs as dict for client "pop" methods removed used arguments
self.cache = CACHE_CONSTRUCTORS[cache_name]( # type: ignore
cache_connection, self.client_pool.request_type, cache_args=kwargs
)
if len(kwargs) > 0:
raise ValueError(f"{list(kwargs.items())} arguments are not recognized.")
self.stop_token = stop_token
def close(self) -> None:
"""Close the client and cache."""
self.client_pool.close()
self.cache.close()
def _validate_kwargs(self, kwargs: Dict, request_params: Request) -> None:
"""Validate kwargs.
Args:
kwargs: kwargs to validate.
request_params: request object to validate against.
"""
# Check for invalid kwargs
non_request_kwargs = [
(k, v) for k, v in kwargs.items() if k not in request_params.__dict__
]
if len(non_request_kwargs) > 0:
raise ValueError(
f"{list(non_request_kwargs)} arguments are not recognized."
)
# Warn for valid but unused kwargs
request_unused_kwargs = [
(k, v) for k, v in kwargs.items() if k not in non_request_kwargs
]
if len(request_unused_kwargs) > 0:
logger.warning(f"{list(request_unused_kwargs)} arguments are unused.")
return
def _split_cached_requests(
self,
request: Request,
client: Client,
overwrite_cache: bool,
) -> Tuple[Dict[int, Response], Request]:
"""Split a request into cached responses and Requests to run.
Args:
request: request object.
overwrite_cache: whether to overwrite cache.
Returns:
cached_idx_to_response: dict of cached responses.
new_request: request object with only prompts to run.
"""
cached_idx_to_response: Dict[int, Response] = {}
new_request = copy.deepcopy(request)
if not overwrite_cache:
if isinstance(new_request.prompt, list) and not isinstance(
request, LMChatRequest
):
new_request.prompt = []
for idx, prompt_str in enumerate(request.prompt):
single_request = copy.deepcopy(request)
single_request.prompt = prompt_str
possible_response = self.cache.get(
client.get_cache_key(single_request)
)
if possible_response:
cached_idx_to_response[idx] = possible_response
else:
new_request.prompt.append(prompt_str)
# Chat or single string requests are not broken down into
# subprompts for caching.
elif (isinstance(new_request.prompt, str)) or (
isinstance(new_request.prompt, list)
and isinstance(request, LMChatRequest)
):
possible_response = self.cache.get(client.get_cache_key(new_request))
if possible_response:
cached_idx_to_response[0] = possible_response
new_request.prompt = None
else:
raise ValueError(
f"Invalid prompt type: {type(new_request.prompt)}"
f" with request type: {type(request)}"
)
return cached_idx_to_response, new_request
def _stitch_responses_and_cache(
self,
request: Request,
client: Client,
response: Union[Response, None],
cached_idx_to_response: Dict[int, Response],
) -> Response:
"""Stich together the cached and uncached responses."""
# We stitch the responses (the choices) here from both the new request the
# cached entries.
all_model_choices = []
all_usages = []
all_input_prompts: List[Union[str, List[str], List[Dict]]] = []
response_idx = 0
number_prompts = len(cached_idx_to_response)
single_completion_output = False
if response:
if isinstance(response.get_request_obj().prompt, str):
single_completion_output = True
number_prompts += 1
elif isinstance(response.get_request_obj().prompt, list) and not isinstance(
request, LMChatRequest
):
number_prompts += len(response.get_request_obj().prompt)
elif isinstance(response.get_request_obj().prompt, list) and isinstance(
request, LMChatRequest
):
assert len(cached_idx_to_response) <= 1
number_prompts += 1
else:
raise ValueError(
f"Invalid prompt type: {type(response.get_request_obj().prompt)}"
f" with request type: {type(request)}"
)
response_type = None
request_type: Type[Request] = None
for idx in range(number_prompts):
if idx in cached_idx_to_response:
cached_res = cached_idx_to_response[idx]
response_type = cached_res._response_type
request_type = cached_res._request_type
all_input_prompts.append(cached_res.get_request_obj().prompt)
if request.n == 1:
assert (
len(cached_res.get_response_obj().choices) == 1
), "cached response should have only one choice"
all_model_choices.extend(cached_res.get_response_obj().choices)
if cached_res.get_usage_obj().usages:
all_usages.extend(cached_res.get_usage_obj().usages)
else:
assert response is not None, "response should not be None"
response = cast(Response, response)
response_type = response._response_type
request_type = response._request_type
# the choices list in the response is a flat one.
# length is request.n * num_prompts
current_choices = response.get_response_obj().choices[
response_idx * request.n : (response_idx + 1) * request.n
]
all_model_choices.extend(current_choices)
if isinstance(
response.get_request_obj().prompt, list
) and not isinstance(request, LMChatRequest):
prompt: Union[
str, List[str], List[Dict]
] = response.get_request_obj().prompt[response_idx]
# Chat request
elif isinstance(response.get_request_obj().prompt, list) and isinstance(
request, LMChatRequest
):
# We will only have response_idx == 0 here as we can only
# support single chat requests.
assert request.n == 1
assert number_prompts <= 1
prompt = response.get_request_obj().prompt
else:
prompt = str(response.get_request_obj().prompt)
usages: Optional[List[Usage]] = None
if response.get_usage_obj().usages:
usages = response.get_usage_obj().usages[
response_idx * request.n : (response_idx + 1) * request.n
]
all_usages.extend(usages)
all_input_prompts.append(prompt)
# set cache
new_request = copy.deepcopy(request)
new_request.prompt = prompt # type: ignore
cache_key = client.get_cache_key(new_request)
new_response = copy.deepcopy(response)
new_response._response.choices = current_choices
new_response._usages = Usages(usages=(usages or []))
self.cache.set(cache_key, new_response.to_dict(drop_request=True))
response_idx += 1
new_request = copy.deepcopy(request)
new_request.prompt = (
all_input_prompts # type: ignore
if len(all_input_prompts) > 1 or not single_completion_output
else all_input_prompts[0]
)
response_obj = Response(
response=ModelChoices(choices=all_model_choices),
cached=len(cached_idx_to_response) > 0,
request=new_request,
usages=Usages(usages=all_usages),
response_type=response_type,
request_type=request_type,
)
return response_obj
def run(
self,
prompt: Union[str, List[str], List[Dict[str, str]]],
overwrite_cache: bool = False,
stop_token: Optional[str] = None,
return_response: bool = False,
stream: bool = False,
**kwargs: Any,
) -> Union[
str,
List[str],
np.ndarray,
List[np.ndarray],
Response,
Iterator[str],
Iterator[Response],
]:
"""
Run the prompt.
Orchestrates between the standard run and chat run and batch run.
Args:
prompt: prompt(s) to run.
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
return_response: whether to return Response object.
stream: whether to stream the prompt. Only supported
for single string prompts and LMs.
Returns:
response from prompt.
"""
if not isinstance(prompt, list) and not isinstance(prompt, str):
raise ValueError(
f"Invalid prompt type: {type(prompt)}. "
"Prompt must be a string or list of strings "
"or list of dicts."
)
if isinstance(prompt, list) and not prompt:
raise ValueError("Prompt cannot be empty list")
# Get the client to run
client = self.client_pool.get_next_client()
if stream:
if not client.supports_streaming_inference():
raise ValueError(
f"Client {client} does not support streaming inference."
)
if not isinstance(prompt, str):
raise ValueError(
"Stream is only supported for single string prompts. "
"It will soon be supported for chat dictionary prompts, too."
)
return self._run_stream(
prompt=cast(str, prompt),
client=client,
overwrite_cache=overwrite_cache,
stop_token=stop_token,
return_response=return_response,
**kwargs,
)
if isinstance(prompt, list) and isinstance(prompt[0], dict):
if not client.IS_CHAT:
raise ValueError(
f"Client {client} does not support dict chat prompt. "
"Please use a chat model."
)
if stop_token:
logger.warning(
"stop_token is not supported for chat prompt. "
"Ignoring stop_token."
)
return self._run_chat(
prompt=cast(List[Dict[str, str]], prompt),
client=client,
overwrite_cache=overwrite_cache,
return_response=return_response,
**kwargs,
)
return self._run(
prompt=cast(Union[str, List[str]], prompt),
client=client,
overwrite_cache=overwrite_cache,
stop_token=stop_token,
return_response=return_response,
**kwargs,
)
def _run(
self,
prompt: Union[str, List[str]],
client: Client,
overwrite_cache: bool = False,
stop_token: Optional[str] = None,
return_response: bool = False,
**kwargs: Any,
) -> Union[str, List[str], np.ndarray, List[np.ndarray], Response]:
"""
Run the prompt.
Args:
prompt: prompt(s) to run.
client: client to run.
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
return_response: whether to return Response object.
Returns:
response from prompt.
"""
is_batch = isinstance(prompt, list)
stop_token = stop_token if stop_token is not None else self.stop_token
# Must pass kwargs as dict for client "pop" methods removed used arguments
request_params = client.get_request(prompt, kwargs)
# Avoid nested list of results - enforce n = 1 for batch
if is_batch and request_params.n > 1:
raise ValueError("Batch mode does not support n > 1.")
self._validate_kwargs(kwargs, request_params)
cached_idx_to_response, request_params = self._split_cached_requests(
request_params, client, overwrite_cache
)
# If not None value or empty list - run new request
if request_params.prompt:
# Start timing metrics
self.client_pool.start_timer()
response = client.run_request(request_params)
self.client_pool.end_timer()
else:
# Nothing to run
response = None
final_response = self._stitch_responses_and_cache(
request=request_params,
client=client,
response=response,
cached_idx_to_response=cached_idx_to_response,
)
# Extract text results
if return_response:
return final_response
else:
return final_response.get_response(stop_token, is_batch)
def _run_chat(
self,
prompt: List[Dict[str, str]],
client: Client,
overwrite_cache: bool = False,
return_response: bool = False,
**kwargs: Any,
) -> Union[str, Response]:
"""
Run the prompt.
Args:
prompt: prompt dictionary to run.
client: client to run.
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
return_response: whether to return Response object.
Returns:
response from prompt.
"""
is_batch = False
# Get a request for an empty prompt to handle all kwargs
request_params = client.get_request("", kwargs)
# Add prompt and cast as chat request
request_params_dict = request_params.to_dict()
request_params_dict["prompt"] = prompt
request_params_as_chat = LMChatRequest(**request_params_dict)
# Avoid nested list of results - enforce n = 1 for batch
if request_params_as_chat.n > 1:
raise ValueError("Chat mode does not support n > 1.")
self._validate_kwargs(kwargs, request_params_as_chat)
cached_idx_to_response, request_params_as_chat = self._split_cached_requests( # type: ignore # noqa: E501
request_params_as_chat, client, overwrite_cache
)
# If not None value or empty list - run new request
if request_params_as_chat.prompt:
# Start timing metrics
self.client_pool.start_timer()
response = client.run_chat_request(request_params_as_chat)
self.client_pool.end_timer()
else:
# Nothing to run
response = None
final_response = self._stitch_responses_and_cache(
request=request_params_as_chat,
client=client,
response=response,
cached_idx_to_response=cached_idx_to_response,
)
# Extract text results
if return_response:
return final_response
else:
return cast(str, final_response.get_response("", is_batch))
def _run_stream(
self,
prompt: str,
client: Client,
overwrite_cache: bool = False,
stop_token: Optional[str] = None,
return_response: bool = False,
**kwargs: Any,
) -> Union[Generator[str, None, None], Generator[Response, None, None]]:
"""
Run the prompt in a stream.
Args:
prompt: prompt(s) to run.
client: client to run.
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
return_response: whether to return Response object.
Returns:
response from prompt.
"""
is_batch = False
stop_token = stop_token if stop_token is not None else self.stop_token
# Must pass kwargs as dict for client "pop" methods removed used arguments
request_params = client.get_request(prompt, kwargs)
# Avoid nested list of results - enforce n = 1 for batch
if request_params.n > 1:
raise ValueError("Stream mode does not support n > 1.")
self._validate_kwargs(kwargs, request_params)
cached_idx_to_response, request_params = self._split_cached_requests(
request_params, client, overwrite_cache
)
if request_params.prompt:
# Because we are streaming, we should have either a cached response
# a prompt to run
assert len(cached_idx_to_response) == 0
response_iter = client.run_streaming_request(request_params)
is_cached = False
else:
assert len(cached_idx_to_response) == 1
response_iter = cached_idx_to_response[0].as_iter()
is_cached = True
saved_responses = []
# Start timing metrics
self.client_pool.start_timer()
for response_token in response_iter:
saved_responses.append(response_token)
if return_response:
yield response_token
else:
yield cast(
Union[str, Response], response_token.get_response("", is_batch)
)
self.client_pool.end_timer()
if not is_cached:
final_response = Response.union_all(
saved_responses, as_single_lmchoice=True
)
self._stitch_responses_and_cache(
request=request_params,
client=client,
response=final_response,
cached_idx_to_response=cached_idx_to_response,
)
async def arun_batch(
self,
prompts: List[str],
overwrite_cache: bool = False,
stop_token: Optional[str] = None,
return_response: bool = False,
chunk_size: int = -1,
verbose: bool = False,
**kwargs: Any,
) -> Union[List[str], List[np.ndarray], Response]:
"""
Run a batch of prompts with async.
If the client pool is a single client, all prompts will be sent
to one client and batch_size (which is passed it as kwargs) will
determine how the prompts are split.
If the client pool is a pool of clients, the prompts will be split
into chunks and sent to the clients. Each client will split the
chunk into batch_size prompts to send to the model.
Args:
prompts: prompts to run.
overwrite_cache: whether to overwrite cache.
stop_token: stop token for prompt generation.
Default is self.stop_token.
"" for no stop token.
return_response: whether to return Response object.
chunk_size: number of prompts to send to a client in chunks.
For each chunk, the client will split the chunk into
batch_sized prompts to send to the model.
For a single manifest client, there is no impact to
setting chunk_size. For a client pool, chunk_size
can be used to distribute the load across the clients.
verbose: whether to print progress of async tasks.
Returns:
response from prompt.
"""
if not isinstance(prompts, list):
raise ValueError("Prompts must be a list of strings.")
if not prompts:
raise ValueError("Prompts must not be empty.")
if not isinstance(prompts[0], str):
raise ValueError("Prompts must be a list of strings.")
# Split the prompts into chunks for connection pool
prompt_chunks: List[Tuple[Client, List[str]]] = []
if chunk_size > 0:
for i in range(0, len(prompts), chunk_size):
prompt_chunks.append(
(self.client_pool.get_next_client(), prompts[i : i + chunk_size])
)
else:
prompt_chunks = [(self.client_pool.get_next_client(), prompts)]
# Run the chunks
tasks = []
for client, chunk in prompt_chunks:
tasks.append(
asyncio.create_task(
self._arun_batch_client(
prompts=chunk,
client=client,
overwrite_cache=overwrite_cache,
verbose=verbose,
**kwargs,
)
)
)
logger.info(f"Running {len(tasks)} tasks across all clients.")
responses = await asyncio.gather(*tasks)
final_response = Response.union_all(responses)
stop_token = stop_token if stop_token is not None else self.stop_token
# Extract text results
if return_response:
return final_response
else:
return cast(
Union[List[str], List[np.ndarray]],
final_response.get_response(stop_token, True),
)
async def _arun_batch_client(
self,
prompts: List[str],
client: Client,
overwrite_cache: bool = False,
verbose: bool = False,
**kwargs: Any,
) -> Response:
"""
Run a batch of prompts with async for single client.
Args:
prompts: prompts to run.
client: client to run.
overwrite_cache: whether to overwrite cache.
verbose: whether to print progress of async tasks.
Returns:
response from prompt.
"""
# Must pass kwargs as dict for client "pop" methods removed used arguments
request_params = client.get_request(prompts, kwargs)
# Avoid nested list of results - enforce n = 1 for batch
if request_params.n > 1:
raise ValueError("Batch mode does not support n > 1.")
self._validate_kwargs(kwargs, request_params)
cached_idx_to_response, request_params = self._split_cached_requests(
request_params, client, overwrite_cache
)
# If not None value or empty list - run new request
if request_params.prompt:
self.client_pool.start_timer()
response = await client.arun_batch_request(request_params, verbose=verbose)
self.client_pool.end_timer()
else:
# Nothing to run
response = None
final_response = self._stitch_responses_and_cache(
request=request_params,
client=client,
response=response,
cached_idx_to_response=cached_idx_to_response,
)
return final_response
def score_prompt(
self,
prompt: Union[str, List[str]],
overwrite_cache: bool = False,
**kwargs: Any,
) -> Dict:
"""
Score the prompt via forward pass of the model - no sampling or generation.
Returns the response object with logits of the prompt.
Args:
prompt: prompt(s) to run.
overwrite_cache: whether to overwrite cache.
Returns:
response from prompt.
"""
client = self.client_pool.get_next_client()
# Must pass kwargs as dict for client "pop" methods removed used arguments
request_params = client.get_request(prompt, kwargs)
request_params_as_score = LMScoreRequest(**request_params.to_dict())
if request_params_as_score.n > 1:
raise ValueError("Sequence scoring does not support n > 1.")
self._validate_kwargs(kwargs, request_params_as_score)
cached_idx_to_response, request_params_as_score = self._split_cached_requests( # type: ignore # noqa: E501
request_params_as_score, client, overwrite_cache
)
# If not None value or empty list - run new request
if request_params_as_score.prompt:
try:
response = cast(HuggingFaceClient, client).run_score_prompt_request(
request_params_as_score
)
except AttributeError:
raise ValueError("`score_prompt` only supported for HF models.")
else:
# Nothing to run
response = None
final_response = self._stitch_responses_and_cache(
request=request_params_as_score,
client=client,
response=response,
cached_idx_to_response=cached_idx_to_response,
)
return final_response.to_dict()
| manifest-main | manifest/manifest.py |
"""Request object."""
from typing import Any, Dict, List, Optional, Tuple, Union
from pydantic import BaseModel
# Used when unioning requests after async connection pool
ENGINE_SEP = "::"
NOT_CACHE_KEYS = {"client_timeout", "batch_size"}
# The below should match those in Request.
DEFAULT_REQUEST_KEYS = {
"client_timeout": ("client_timeout", 60), # seconds
"batch_size": ("batch_size", 8),
"run_id": ("run_id", None),
}
class Request(BaseModel):
"""Request object."""
# Prompt
prompt: Union[str, List[str]] = ""
# Engine
engine: str = "text-ada-001"
# Number completions
n: int = 1
# Timeout
client_timeout: int = 60
# Run id used to repeat run with same parameters
run_id: Optional[str] = None
# Batch size for async batch run
batch_size: int = 8
def to_dict(
self, allowable_keys: Dict[str, Tuple[str, Any]] = None, add_prompt: bool = True
) -> Dict[str, Any]:
"""
Convert request to a dictionary.
Handles parameter renaming but does not fill in default values.
It will drop any None values.
Add prompt ensures the prompt is always in the output dictionary.
"""
if allowable_keys:
include_keys = set(allowable_keys.keys())
if add_prompt and "prompt":
include_keys.add("prompt")
else:
allowable_keys = {}
include_keys = None
request_dict = {
allowable_keys.get(k, (k, None))[0]: v
for k, v in self.dict(include=include_keys).items()
if v is not None
}
return request_dict
class LMRequest(Request):
"""Language Model Request object."""
# Temperature for generation
temperature: float = 0.7
# Max tokens for generation
max_tokens: int = 100
# Nucleus sampling taking top_p probability mass tokens
top_p: float = 1.0
# Top k sampling taking top_k highest probability tokens
top_k: int = 50
# Logprobs return value
logprobs: Optional[int] = None
# Stop sequences
stop_sequences: Optional[List[str]] = None
# Number beams beam search (HF)
num_beams: int = 1
# Whether to sample or do greedy (HF)
do_sample: bool = False
# Penalize repetition (HF)
repetition_penalty: float = 1.0
# Length penalty (HF)
length_penalty: float = 1.0
# Penalize resence
presence_penalty: float = 0
# Penalize frequency
frequency_penalty: float = 0
class LMChatRequest(LMRequest):
"""Language Model Chat Request object."""
prompt: List[Dict[str, str]] = {} # type: ignore
class LMScoreRequest(LMRequest):
"""Language Model Score Request object."""
pass
class EmbeddingRequest(Request):
"""Embedding Request object."""
pass
class DiffusionRequest(Request):
"""Diffusion Model Request object."""
# Number of steps
num_inference_steps: int = 50
# Height of image
height: int = 512
# Width of image
width: int = 512
# Guidance scale
guidance_scale: float = 7.5
# Eta
eta: float = 0.0
| manifest-main | manifest/request.py |
"""Manifest init."""
from manifest.manifest import Manifest
from manifest.request import Request
from manifest.response import Response
__all__ = ["Manifest", "Response", "Request"]
| manifest-main | manifest/__init__.py |
"""Client response."""
import copy
import json
from typing import Any, Dict, Generator, List, Optional, Type, Union, cast
import numpy as np
from pydantic import BaseModel
from manifest.request import (
ENGINE_SEP,
DiffusionRequest,
EmbeddingRequest,
LMChatRequest,
LMRequest,
LMScoreRequest,
Request,
)
RESPONSE_CONSTRUCTORS: Dict[Type[Request], Dict[str, Union[str, Type[Request]]]] = {
LMRequest: {"response_type": "text", "request_type": LMRequest},
LMChatRequest: {"response_type": "text", "request_type": LMChatRequest},
LMScoreRequest: {"response_type": "text", "request_type": LMScoreRequest},
EmbeddingRequest: {"response_type": "array", "request_type": EmbeddingRequest},
DiffusionRequest: {"response_type": "array", "request_type": DiffusionRequest},
}
class NumpyArrayEncoder(json.JSONEncoder):
"""Numpy array encoder."""
def default(self, obj: Any) -> str:
"""Encode numpy array."""
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class Usage(BaseModel):
"""Prompt usage class."""
completion_tokens: int = 0
prompt_tokens: int = 0
total_tokens: int = 0
class Usages(BaseModel):
"""Prompt usage class."""
usages: List[Usage]
class LMModelChoice(BaseModel):
"""Model single completion."""
text: str
token_logprobs: Optional[List[Optional[float]]] = None
tokens: Optional[List[str]] = None
class ArrayModelChoice(BaseModel):
"""Model single completion."""
array: np.ndarray
token_logprobs: Optional[List[float]] = None
class Config:
"""Pydantic config class."""
arbitrary_types_allowed = True
class ModelChoices(BaseModel):
"""Model choices."""
choices: List[Union[LMModelChoice, ArrayModelChoice]]
class Response:
"""Response class."""
def __init__(
self,
response: ModelChoices,
cached: bool,
request: Request,
response_type: str,
request_type: Type[Request],
usages: Optional[Usages] = None,
):
"""
Initialize response.
Args:
response: response dict.
usages: usage dict.
cached: whether response is cached.
request: request.
response_type: response type.
request_type: request type.
"""
self._item_dtype = None
self._response_type = response_type
if self._response_type not in {"array", "text"}:
raise ValueError(f"Invalid response type {self._response_type}")
self._request_type = request_type
self._response = response
self._usages = usages or Usages(usages=[])
self._cached = cached
self._request = request
if self._response.choices:
if response_type == "array":
if not isinstance(self._response.choices[0], ArrayModelChoice):
raise ValueError(
"response_type is array but response is "
f"{self._response.choices[0].__class__}"
)
self._item_dtype = str(
cast(ArrayModelChoice, self._response.choices[0]).array.dtype
)
else:
if not isinstance(self._response.choices[0], LMModelChoice):
raise ValueError(
"response_type is text but response is "
f"{self._response.choices[0].__class__}"
)
def is_cached(self) -> bool:
"""Check if response is cached."""
return self._cached
def get_request_obj(self) -> Request:
"""Get request parameters."""
return self._request
def get_response_obj(self) -> ModelChoices:
"""Get response object."""
return self._response
def get_usage_obj(self) -> Usages:
"""Get usage object."""
return self._usages
def get_json_response(self) -> Dict:
"""Get response dict without parsing."""
return self._response.dict()
def get_response(
self, stop_token: str = "", is_batch: bool = False
) -> Union[str, List[str], np.ndarray, List[np.ndarray]]:
"""
Get all results from response.
Args:
stop_token: stop token for string generation
is_batch: whether response is batched
"""
process_result = lambda x: x.split(stop_token)[0] if stop_token else x
extracted_items = [
choice.text if isinstance(choice, LMModelChoice) else choice.array
for choice in self._response.choices
]
if len(extracted_items) == 0:
return None
if isinstance(extracted_items[0], str):
processed_results = list(map(process_result, extracted_items))
else:
processed_results = extracted_items
if len(processed_results) == 1 and not is_batch:
return processed_results[0]
else:
return processed_results
@classmethod
def union_all(
cls, responses: List["Response"], as_single_lmchoice: bool = False
) -> "Response":
"""Union a list of response.
Args:
responses: list of responses to union.
as_single_lmchoice: if True, will concatenate all responses into a single
model choice. Useful for merging streaming responses. Only valid
for LMRequest responses.
"""
if not responses:
raise ValueError("Response list is empty.")
if len(responses) == 1:
return responses[0]
first_response = responses[0]
request_type = first_response._request_type
response_type = first_response._response_type
request = first_response.get_request_obj()
if as_single_lmchoice and response_type != "text":
raise ValueError("as_single_lmchoice=True only works for text responses.")
# Make sure all responses have the same keys
if not all(
[
(r._request_type == request_type)
and (r._response_type == response_type)
for r in responses
]
):
raise ValueError("All responses must have the same keys.")
# Get all the prompts and model choices
all_prompts = []
all_choices = []
all_usages: List[Usage] = []
all_engines = []
for res in responses:
all_engines.extend(res.get_request_obj().engine.split(ENGINE_SEP))
res_prompt = res.get_request_obj().prompt
if isinstance(res_prompt, str):
res_prompt = [res_prompt]
all_prompts.extend(res_prompt)
all_choices.extend(res.get_response_obj().choices)
if res.get_usage_obj().usages:
all_usages.extend(res.get_usage_obj().usages)
else:
# Add empty usages if not present
all_usages.extend([Usage()] * len(res_prompt))
new_request = copy.deepcopy(request)
new_request.engine = ENGINE_SEP.join(sorted(set(all_engines)))
if as_single_lmchoice:
if len(set(all_prompts)) != 1:
raise ValueError("Prompts must be the same for as_single_lmchoice=True")
all_choices_txt = cast(List[LMModelChoice], all_choices) # type: ignore
single_prompt = all_prompts[0]
single_text = "".join([choice.text for choice in all_choices_txt])
single_logprobs = [
logprob
for choice in all_choices_txt
for logprob in choice.token_logprobs or []
]
single_tokens = [
token for choice in all_choices_txt for token in choice.tokens or []
]
single_usage = Usage(
completion_tokens=sum(usg.completion_tokens for usg in all_usages),
prompt_tokens=sum(usg.prompt_tokens for usg in all_usages),
total_tokens=sum(usg.total_tokens for usg in all_usages),
)
new_choices = [
LMModelChoice(
text=single_text,
token_logprobs=single_logprobs,
tokens=single_tokens,
)
]
new_responses = ModelChoices(choices=new_choices) # type: ignore
new_usages = Usages(usages=[single_usage])
new_request.prompt = single_prompt
response_obj = cls(
response=new_responses,
cached=any(res.is_cached() for res in responses),
request=new_request,
usages=new_usages,
request_type=request_type,
response_type=response_type,
)
return response_obj
else:
new_request.prompt = all_prompts
new_response = ModelChoices(choices=all_choices)
new_usages = Usages(usages=all_usages)
response_obj = cls(
response=new_response,
cached=any(res.is_cached() for res in responses),
request=new_request,
usages=new_usages,
request_type=request_type,
response_type=response_type,
)
return response_obj
# Return a token by token iterator over the response
def as_iter(self) -> Generator["Response", None, None]:
"""Return a token by token iterator over the response.
Will return iterator of responses with one token each.
"""
if self._response_type not in {"text"}:
raise ValueError(
f"Invalid response type {self._response_type} for as_iter()"
)
if not self._response.choices:
raise ValueError("No choices in response.")
if len(self._response.choices) > 1:
raise ValueError(
"Response has more than one choice. as_iter() "
"should be over single choice responses."
)
if not isinstance(self._response.choices[0], LMModelChoice):
raise ValueError(
"response_type is text but response is "
f"{self._response.choices[0].__class__}"
)
choice = cast(LMModelChoice, self._response.choices[0])
# If tokens, return iterator of tokens
if choice.tokens:
for token, logprob in zip(choice.tokens, choice.token_logprobs):
yield Response(
response=ModelChoices(
choices=[
LMModelChoice(
text=token, token_logprobs=[logprob], tokens=[token]
)
]
),
cached=self._cached,
request=self._request,
usages=self._usages,
request_type=self._request_type,
response_type=self._response_type,
)
# Otherwise, do it by words
else:
for i, word in enumerate(choice.text.split(" ")):
word = " " + word if i > 0 else word
yield Response(
response=ModelChoices(
choices=[
LMModelChoice(text=word, token_logprobs=None, tokens=None)
]
),
cached=self._cached,
request=self._request,
usages=self._usages,
request_type=self._request_type,
response_type=self._response_type,
)
def serialize(self) -> str:
"""
Serialize response to string.
Returns:
serialized response.
"""
return json.dumps(self.to_dict(), sort_keys=True, cls=NumpyArrayEncoder)
@classmethod
def deserialize(cls, value: str) -> "Response":
"""
Deserialize string to response.
Args:
value: serialized response.
Returns:
serialized response.
"""
deserialized = json.loads(value)
return cls.from_dict(deserialized)
def to_dict(self, drop_request: bool = False) -> Dict:
"""
Get dictionary representation of response.
Returns:
dictionary representation of response.
"""
to_return = {
"response": self._response.dict(),
"usages": self._usages.dict(),
"cached": self._cached,
"request": self._request.dict(),
"response_type": self._response_type,
"request_type": str(self._request_type.__name__),
"item_dtype": self._item_dtype,
}
if drop_request:
to_return.pop("request")
return to_return
@classmethod
def from_dict(
cls, response_dict: Dict, request_dict: Optional[Dict] = None
) -> "Response":
"""
Create response from dictionary.
Args:
response: dictionary representation of response.
request_dict: dictionary representation of request which
will override what is in response_dict.
Returns:
response.
"""
if "request" not in response_dict and request_dict is None:
raise ValueError(
"Request dictionary must be provided if "
"request is not in response dictionary."
)
item_dtype = response_dict["item_dtype"]
response_type = response_dict["response_type"]
if response_dict["request_type"] == "LMRequest":
request_type: Type[Request] = LMRequest
elif response_dict["request_type"] == "LMChatRequest":
request_type = LMChatRequest
elif response_dict["request_type"] == "LMScoreRequest":
request_type = LMScoreRequest
elif response_dict["request_type"] == "EmbeddingRequest":
request_type = EmbeddingRequest
elif response_dict["request_type"] == "DiffusionRequest":
request_type = DiffusionRequest
choices: List[Union[LMModelChoice, ArrayModelChoice]] = []
if item_dtype and response_type == "array":
for choice in response_dict["response"]["choices"]:
choice["array"] = np.array(choice["array"]).astype(item_dtype)
choices.append(ArrayModelChoice(**choice))
else:
for choice in response_dict["response"]["choices"]:
choices.append(LMModelChoice(**choice))
response = ModelChoices(choices=choices)
return cls(
response=response,
usages=Usages(**response_dict["usages"]),
cached=response_dict["cached"],
request=request_type(**(request_dict or response_dict["request"])),
response_type=response_type,
request_type=request_type,
)
def __str__(self) -> str:
"""
Get string representation of response.
Returns:
string representation of response.
"""
return self.serialize()
def __repr__(self) -> str:
"""
Get string representation of response.
Returns:
string representation of response.
"""
return str(self)
| manifest-main | manifest/response.py |
"""OpenAI client."""
import copy
import logging
import os
from typing import Any, Dict, List, Optional
import numpy as np
import tiktoken
from manifest.clients.openai import OpenAIClient
from manifest.request import EmbeddingRequest
logger = logging.getLogger(__name__)
OPENAI_EMBEDDING_ENGINES = {
"text-embedding-ada-002",
}
class OpenAIEmbeddingClient(OpenAIClient):
"""OpenAI client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "text-embedding-ada-002"),
}
REQUEST_CLS = EmbeddingRequest
NAME = "openaiembedding"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the OpenAI server.
connection_str is passed as default OPENAI_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
self.api_key = connection_str or os.environ.get("OPENAI_API_KEY")
if self.api_key is None:
raise ValueError(
"OpenAI API key not set. Set OPENAI_API_KEY environment "
"variable or pass through `client_connection`."
)
self.host = "https://api.openai.com/v1"
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in OPENAI_EMBEDDING_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. "
f"Must be {OPENAI_EMBEDDING_ENGINES}."
)
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/embeddings"
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
if "data" not in response:
raise ValueError(f"Invalid response: {response}")
if "usage" in response:
# Handle splitting the usages for batch requests
if len(response["data"]) == 1:
if isinstance(response["usage"], list):
response["usage"] = response["usage"][0]
response["usage"] = [response["usage"]]
else:
# Try to split usage
split_usage = self.split_usage(request, response["data"])
if split_usage:
response["usage"] = split_usage
return response
def _format_request_for_embedding(self, request_params: Dict[str, Any]) -> Dict:
"""Format request params for embedding.
Args:
request_params: request params.
Returns:
formatted request params.
"""
# Format for embedding model
request_params = copy.deepcopy(request_params)
prompt = request_params.pop("prompt")
if isinstance(prompt, str):
prompt_list = [prompt]
else:
prompt_list = prompt
request_params["input"] = prompt_list
return request_params
def _format_request_from_embedding(self, response_dict: Dict[str, Any]) -> Dict:
"""Format response from embedding for standard response.
Args:
response_dict: response.
Return:
formatted response.
"""
new_choices = []
response_dict = copy.deepcopy(response_dict)
for res in response_dict.pop("data"):
new_choices.append({"array": np.array(res["embedding"])})
response_dict["choices"] = new_choices
return response_dict
def _run_completion(
self, request_params: Dict[str, Any], retry_timeout: int
) -> Dict:
"""Execute completion request.
Args:
request_params: request params.
retry_timeout: retry timeout.
Returns:
response as dict.
"""
# Format for embedding model
request_params = self._format_request_for_embedding(request_params)
response_dict = super()._run_completion(request_params, retry_timeout)
# Reformat for text model
response_dict = self._format_request_from_embedding(response_dict)
return response_dict
async def _arun_completion(
self, request_params: Dict[str, Any], retry_timeout: int
) -> Dict:
"""Async execute completion request.
Args:
request_params: request params.
retry_timeout: retry timeout.
Returns:
response as dict.
"""
# Format for embedding model
request_params = self._format_request_for_embedding(request_params)
response_dict = await super()._arun_completion(request_params, retry_timeout)
# Reformat for text model
response_dict = self._format_request_from_embedding(response_dict)
return response_dict
def split_usage(self, request: Dict, choices: List[str]) -> List[Dict[str, int]]:
"""Split usage into list of usages for each prompt."""
try:
encoding = tiktoken.encoding_for_model(getattr(self, "engine"))
except Exception:
return []
prompt = request["input"]
if isinstance(prompt, str):
prompts = [prompt]
else:
prompts = prompt
assert len(prompts) == len(choices)
usages = []
for pmt in prompts:
pmt_tokens = len(encoding.encode(pmt))
# No completion tokens for embedding models
chc_tokens = 0
usage = {
"prompt_tokens": pmt_tokens,
"completion_tokens": chc_tokens,
"total_tokens": pmt_tokens + chc_tokens,
}
usages.append(usage)
return usages
| manifest-main | manifest/clients/openai_embedding.py |
"""Azure client."""
import logging
import os
from typing import Any, Dict, Optional, Type
from manifest.clients.openai import OPENAI_ENGINES, OpenAIClient
from manifest.request import LMRequest, Request
logger = logging.getLogger(__name__)
# Azure deployment name can only use letters and numbers, no spaces. Hyphens ("-") and
# underscores ("_") may be used, except as ending characters. We create this mapping to
# handle difference between Azure and OpenAI
AZURE_DEPLOYMENT_NAME_MAPPING = {
"gpt-3.5-turbo": "gpt-35-turbo",
"gpt-3.5-turbo-0301": "gpt-35-turbo-0301",
}
OPENAI_DEPLOYMENT_NAME_MAPPING = {
"gpt-35-turbo": "gpt-3.5-turbo",
"gpt-35-turbo-0301": "gpt-3.5-turbo-0301",
}
class AzureClient(OpenAIClient):
"""Azure client."""
PARAMS = OpenAIClient.PARAMS
REQUEST_CLS: Type[Request] = LMRequest
NAME = "azureopenai"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the AzureOpenAI server.
connection_str is passed as default AZURE_OPENAI_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
self.api_key, self.host = None, None
if connection_str:
connection_parts = connection_str.split("::")
if len(connection_parts) == 1:
self.api_key = connection_parts[0]
elif len(connection_parts) == 2:
self.api_key, self.host = connection_parts
else:
raise ValueError(
"Invalid connection string. "
"Must be either AZURE_OPENAI_KEY or "
"AZURE_OPENAI_KEY::AZURE_OPENAI_ENDPOINT"
)
self.api_key = self.api_key or os.environ.get("AZURE_OPENAI_KEY")
if self.api_key is None:
raise ValueError(
"AzureOpenAI API key not set. Set AZURE_OPENAI_KEY environment "
"variable or pass through `client_connection`."
)
self.host = self.host or os.environ.get("AZURE_OPENAI_ENDPOINT")
if self.host is None:
raise ValueError(
"Azure Service URL not set "
"(e.g. https://openai-azure-service.openai.azure.com/)."
" Set AZURE_OPENAI_ENDPOINT or pass through `client_connection`."
" as AZURE_OPENAI_KEY::AZURE_OPENAI_ENDPOINT"
)
self.host = self.host.rstrip("/")
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in OPENAI_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. Must be {OPENAI_ENGINES}."
)
def get_generation_url(self) -> str:
"""Get generation URL."""
engine = getattr(self, "engine")
deployment_name = AZURE_DEPLOYMENT_NAME_MAPPING.get(engine, engine)
return (
self.host
+ "/openai/deployments/"
+ deployment_name
+ "/completions?api-version=2023-05-15"
)
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {"api-key": f"{self.api_key}"}
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
# IMPORTANT!!!
# Azure models are the same as openai models. So we want to unify their
# cached. Make sure we retrun the OpenAI name here.
return {"model_name": OpenAIClient.NAME, "engine": getattr(self, "engine")}
| manifest-main | manifest/clients/azureopenai.py |
"""Hugging Face client."""
import logging
from functools import lru_cache
from typing import Any, Dict, Optional, Tuple
import numpy as np
import requests
from manifest.clients.client import Client
from manifest.request import EmbeddingRequest
logger = logging.getLogger(__name__)
class HuggingFaceEmbeddingClient(Client):
"""HuggingFaceEmbedding client."""
# User param -> (client param, default value)
PARAMS: Dict[str, Tuple[str, Any]] = {}
REQUEST_CLS = EmbeddingRequest
NAME = "huggingfaceembedding"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the HuggingFace url.
Arsg:
connection_str: connection string.
client_args: client arguments.
"""
if not connection_str:
raise ValueError("Must provide connection string")
self.host = connection_str.rstrip("/")
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/embed"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
@lru_cache(maxsize=1)
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
res = requests.post(self.host + "/params").json()
res["client_name"] = self.NAME
return res
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
# Convert array to np.array
for choice in response["choices"]:
choice["array"] = np.array(choice["array"])
return response
| manifest-main | manifest/clients/huggingface_embedding.py |
"""TOMA client."""
import base64
import io
import logging
from typing import Any, Dict
import numpy as np
from PIL import Image
from manifest.clients.toma import TOMAClient
from manifest.request import DiffusionRequest
logger = logging.getLogger(__name__)
# Engines are dynamically instantiated from API
# but a few example engines are listed below.
TOMA_ENGINES = {
"StableDiffusion",
}
class TOMADiffuserClient(TOMAClient):
"""TOMADiffuser client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "StableDiffusion"),
"num_inference_steps": ("steps", 50),
"height": ("height", 512),
"width": ("width", 512),
"n": ("n", 1),
"guidance_scale": ("guidance_scale", 7.5),
}
REQUEST_CLS = DiffusionRequest # type: ignore
NAME = "tomadiffuser"
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
return {
"model": getattr(self, "engine"),
"choices": [
{
"array": np.array(
Image.open(
io.BytesIO(
base64.decodebytes(bytes(item["image_base64"], "utf-8"))
)
)
),
}
for item in response["output"]["choices"]
],
}
| manifest-main | manifest/clients/toma_diffuser.py |
"""OpenAIChat client."""
import copy
import logging
import os
from typing import Any, Dict, Optional
from manifest.clients.openai import OpenAIClient
from manifest.request import LMRequest
logger = logging.getLogger(__name__)
# List from https://platform.openai.com/docs/models/model-endpoint-compatibility
OPENAICHAT_ENGINES = {"gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"}
class OpenAIChatClient(OpenAIClient):
"""OpenAI Chat client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "gpt-3.5-turbo"),
"temperature": ("temperature", 1.0),
"max_tokens": ("max_tokens", 10),
"n": ("n", 1),
"top_p": ("top_p", 1.0),
"stop_sequences": ("stop", None), # OpenAI doesn't like empty lists
"presence_penalty": ("presence_penalty", 0.0),
"frequency_penalty": ("frequency_penalty", 0.0),
"batch_size": ("batch_size", 1),
}
REQUEST_CLS = LMRequest
NAME = "openaichat"
IS_CHAT = True
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the OpenAI server.
connection_str is passed as default OPENAI_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
self.api_key = connection_str or os.environ.get("OPENAI_API_KEY")
if self.api_key is None:
raise ValueError(
"OpenAI API key not set. Set OPENAI_API_KEY environment "
"variable or pass through `client_connection`."
)
self.host = "https://api.openai.com/v1"
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in OPENAICHAT_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. "
f"Must be {OPENAICHAT_ENGINES}."
)
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/chat/completions"
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return False
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def preprocess_request_params(self, request: Dict[str, Any]) -> Dict[str, Any]:
"""
Preprocess request params.
Args:
request: request params.
Returns:
request params.
"""
# Format for chat model
request = copy.deepcopy(request)
prompt = request.pop("prompt")
if isinstance(prompt, str):
messages = [{"role": "user", "content": prompt}]
elif isinstance(prompt, list) and isinstance(prompt[0], str):
prompt_list = prompt
messages = [{"role": "user", "content": prompt} for prompt in prompt_list]
elif isinstance(prompt, list) and isinstance(prompt[0], dict):
for pmt_dict in prompt:
if "role" not in pmt_dict or "content" not in pmt_dict:
raise ValueError(
"Prompt must be list of dicts with 'role' and 'content' "
f"keys. Got {prompt}."
)
messages = prompt
else:
raise ValueError(
"Prompt must be string, list of strings, or list of dicts."
f"Got {prompt}"
)
request["messages"] = messages
return super().preprocess_request_params(request)
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Postprocess and validate response as dict.
Args:
response: response
request: request
Return:
response as dict
"""
new_choices = []
response = copy.deepcopy(response)
for message in response["choices"]:
if "delta" in message:
# This is a streaming response
if "content" in message["delta"]:
new_choices.append({"text": message["delta"]["content"]})
else:
new_choices.append({"text": message["message"]["content"]})
response["choices"] = new_choices
return super().postprocess_response(response, request)
| manifest-main | manifest/clients/openai_chat.py |
"""TOMA client."""
import logging
import os
from datetime import datetime
from typing import Any, Dict, Optional
import requests
from manifest.clients.client import Client
from manifest.request import LMRequest
logger = logging.getLogger(__name__)
# Engines are dynamically instantiated from API
# but a few example engines are listed below.
TOMA_ENGINES = {
"Together-gpt-JT-6B-v1",
}
class TOMAClient(Client):
"""TOMA client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "Together-gpt-JT-6B-v1"),
"temperature": ("temperature", 0.1),
"max_tokens": ("max_tokens", 32),
# n is depricated with new API but will come back online soon
# "n": ("n", 1),
"top_p": ("top_p", 0.9),
"top_k": ("top_k", 40),
"stop_sequences": ("stop", []),
}
REQUEST_CLS = LMRequest
NAME = "toma"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the TOMA url.
Arsg:
connection_str: connection string.
client_args: client arguments.
"""
self.host = os.environ.get("TOMA_URL", None)
if not self.host:
raise ValueError("TOMA_URL environment variable not set.")
# self.api_key = os.environ.get("TOMA_API_KEY", connection_str)
# if self.api_key is None:
# raise ValueError(
# "TOMA API key not set. Set TOMA_API_KEY environment "
# "variable or pass through `client_connection`."
# )
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
# Not functioning yet in new TOMA API. Will come back online soon.
"""
model_heartbeats = self.get_model_heartbeats()
if getattr(self, "engine") not in model_heartbeats.keys():
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. "
f"Must be {model_heartbeats.keys()}."
)
model_heartbeat_threshold = 120
logger.info(f"TOMA model heartbeats\n {json.dumps(model_heartbeats)}")
if (
model_heartbeats[getattr(self, "engine")]["last_ping"]
> model_heartbeat_threshold
):
logger.warning(
f"Model {getattr(self, 'engine')} has not been pinged in "
f"{model_heartbeats[getattr(self, 'engine')]} seconds."
)
if model_heartbeats[getattr(self, "engine")]["expected_runtime"] > getattr(
self, "client_timeout"
):
logger.warning(
f"Model {getattr(self, 'engine')} has expected runtime "
f"{model_heartbeats[getattr(self, 'engine')]['expected_runtime']} "
f"and may take longer than {getattr(self, 'client_timeout')} "
"seconds to respond. Increase client_timeout "
"to avoid timeout."
)
"""
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/inference"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return False
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def get_model_heartbeats(self) -> Dict[str, Dict]:
"""
Get TOMA models and their last ping time.
Some TOMA models are not loaded and will not response.
Returns:
model name to time since last ping (sec).
"""
res = requests.get(self.host + "/model_statuses").json()
heartbeats = {}
for mod in res:
mod_time = datetime.fromisoformat(mod["last_heartbeat"])
now = datetime.now(mod_time.tzinfo)
heartbeats[mod["name"]] = {
"last_ping": (now - mod_time).total_seconds(),
"expected_runtime": mod["expected_runtime"],
}
return heartbeats
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
return {
"model": getattr(self, "engine"),
"choices": [
{
"text": item["text"],
# "token_logprobs": [],
}
for item in response["output"]["choices"]
],
}
| manifest-main | manifest/clients/toma.py |
"""Client class."""
import asyncio
import copy
import json
import logging
import math
from abc import ABC, abstractmethod
from typing import Any, Dict, Generator, List, Optional, Tuple, Union, cast
import aiohttp
import requests
import tqdm.asyncio
from tenacity import RetryCallState, retry, stop_after_attempt, wait_random_exponential
from manifest.request import (
DEFAULT_REQUEST_KEYS,
NOT_CACHE_KEYS,
LMChatRequest,
LMRequest,
LMScoreRequest,
Request,
)
from manifest.response import (
RESPONSE_CONSTRUCTORS,
ArrayModelChoice,
LMModelChoice,
ModelChoices,
Response,
Usage,
Usages,
)
logger = logging.getLogger(__name__)
ATTEMPTS_BEFORE_STOP = 20
ATTEMPTS_TIMEOUT = 120
# http_status mainly for azure and e.code mainly for openai usage
# e.http_status == 408 occurs when Azure times out
# e.code == 429 rate lime
# e.code == 500 or 502 occurs when server error
API_ERROR_CODE = {408, 429, 500, 502}
def retry_if_ratelimit(retry_base: RetryCallState) -> bool:
"""Return whether to retry if ratelimited."""
try:
if isinstance(retry_base.outcome.exception(), requests.exceptions.HTTPError):
exception = cast(
requests.exceptions.HTTPError, retry_base.outcome.exception()
)
# 500 is a server error, 429 is a rate limit error
if exception.response.status_code in API_ERROR_CODE: # type: ignore
return True
except Exception:
pass
return False
def return_error_response(retry_state: RetryCallState) -> dict:
"""Return error response if all retries failed."""
request_params = retry_state.args[1]
number_of_prompts = (
len(request_params["prompt"])
if "prompt" in request_params
else len(request_params["messages"])
)
return {
"choices": [],
"usage": {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
},
"errors": [str(retry_state.outcome.exception())] * number_of_prompts,
}
class Client(ABC):
"""Client class."""
# Must be overridden by child class
PARAMS: Dict[str, Tuple[str, Any]] = {}
REQUEST_CLS = Request
NAME: str = None
IS_CHAT: bool = False
def __init__(
self, connection_str: Optional[str] = None, client_args: Dict[str, Any] = {}
):
"""
Initialize client.
kwargs are passed to client as default parameters.
For clients like OpenAI that do not require a connection,
the connection_str can be None.
Args:
connection_str: connection string for client.
client_args: client arguments.
"""
self.connect(connection_str, client_args)
@abstractmethod
def connect(
self, connection_str: Optional[str], client_args: Dict[str, Any]
) -> None:
"""
Connect to client.
Override in child client class.
Args:
connection_str: connection string.
"""
raise NotImplementedError()
@abstractmethod
def close(self) -> None:
"""Close the client.
Override in child client class.
"""
raise NotImplementedError()
@abstractmethod
def get_generation_url(self) -> str:
"""Get generation URL.
Override in child client class.
"""
raise NotImplementedError()
@abstractmethod
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Override in child client class.
Returns:
header.
"""
raise NotImplementedError()
@abstractmethod
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference.
Override in child client class.
"""
raise NotImplementedError()
@abstractmethod
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
raise NotImplementedError()
@abstractmethod
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Override in child client class.
Returns:
model params.
"""
raise NotImplementedError()
def get_tokenizer(self, model: str) -> Tuple[Any, int]:
"""Get tokenizer for model.
Override in child client class. Return None, -1 if not supported
or no prompt truncation required.
Returns:
tokenizer: tokenizer with encoder and decode
max_length: max length of model
"""
return None, -1
def get_model_inputs(self) -> List:
"""
Get allowable model inputs.
Returns:
model inputs.
"""
return list(self.PARAMS.keys())
def split_usage(self, request: Dict, choices: List[str]) -> List[Dict[str, int]]:
"""Split usage into list of usages for each prompt."""
# TODO: add this in using default tokenizer
return []
def preprocess_request_params(self, request: Dict[str, Any]) -> Dict[str, Any]:
"""
Preprocess request params.
Override in child client class to reformat requests to model.
Args:
request: request params.
Returns:
request params.
"""
return request
def postprocess_response(
self, response: Dict[str, Any], request: Dict[str, Any]
) -> Dict[str, Any]:
"""
Postprocess and validate response as dict.
Override in child client class to reform model responses.
Args:
response: response
request: request
Return:
response as dict
"""
if "choices" not in response:
raise ValueError(f"Invalid response: {response}")
if "usage" in response:
# Handle splitting the usages for batch requests
if len(response["choices"]) == 1:
if isinstance(response["usage"], list):
response["usage"] = response["usage"][0]
response["usage"] = [response["usage"]]
else:
# Try to split usage
split_usage = self.split_usage(request, response["choices"])
if split_usage:
response["usage"] = split_usage
return response
def get_request(
self, prompt: Union[str, List[str]], request_args: Dict[str, Any]
) -> Request:
"""
Parse model kwargs to request.
Args:
prompt: prompt.
request_args: request arguments.
Returns:
request.
"""
params = {"prompt": prompt}
# Adds default values from self.PARAMS if not in request_args
for key in self.PARAMS:
params[key] = request_args.pop(key, getattr(self, key))
# Allows for overriding DEFAULT_REQUEST_KEYS even if they are not
# in self.PARAMS. Note that DEFAULT_REQUEST_KEYS match the default
# values in Request.
for key in DEFAULT_REQUEST_KEYS:
if key not in params and key in request_args:
params[key] = request_args.pop(key)
return self.REQUEST_CLS(**params) # type: ignore
def _get_request_params(self, request: Request) -> Dict[str, Any]:
"""Get request params.
Add default keys that we need for requests such as batch_size.
We drop these before sending to the model.
"""
params_to_add = DEFAULT_REQUEST_KEYS.copy()
# This will override DEFAULT_REQUEST_KEYS with those in PARAMS
params_to_add.update(self.PARAMS)
# to_dict will handle parameter renaming but not any
# default value handling - that is done in get_request()
request_params = request.to_dict(params_to_add)
return request_params
def get_cache_key(self, request: Request) -> Dict[str, Any]:
"""Get cache key for request.
Skip keys that are not cache keys such as batch_size.
"""
request_params = self._get_request_params(request)
for key in NOT_CACHE_KEYS:
request_params.pop(key, None)
# Make sure to add model params and request class
request_params.update(self.get_model_params())
request_params["request_cls"] = request.__class__.__name__
return request_params
def _split_requests(
self, request_params: Dict[str, Any], batch_size: int, key: str = "prompt"
) -> List[Dict[str, Any]]:
"""Split request into batch_sized request.
Args:
request_params: request params.
batch_size: batch size for requests.
key: key to batch over
Returns:
list of request params.
"""
data = copy.deepcopy(request_params[key])
data_size = len(request_params[key])
request_params_list = []
for i in range(0, data_size, batch_size):
params = copy.deepcopy(request_params)
params[key] = data[i] if batch_size == 1 else data[i : i + batch_size]
request_params_list.append(params)
return request_params_list
def _get_model_choices(self, response: Dict) -> ModelChoices:
"""Format response to ModelChoices."""
# Array or text response
response_type = RESPONSE_CONSTRUCTORS[self.REQUEST_CLS]["response_type"]
if response_type == "array":
choices: List[Union[LMModelChoice, ArrayModelChoice]] = [
ArrayModelChoice(**choice) for choice in response["choices"]
]
else:
choices = [LMModelChoice(**choice) for choice in response["choices"]]
return ModelChoices(choices=choices)
def _stitch_responses(self, request: Request, responses: List[Dict]) -> Response:
"""Stitch responses together.
Useful for batch requests.
"""
choices = []
usages = []
for res_dict in responses:
choices.extend(res_dict["choices"])
if "usage" in res_dict:
usages.extend(res_dict["usage"])
final_response_dict = {"choices": choices}
final_usages = None
if usages:
final_usages = Usages(usages=[Usage(**usage) for usage in usages])
# TODO: Add usage based on tokenizer
return Response(
self._get_model_choices(final_response_dict),
cached=False,
request=request,
usages=final_usages,
**RESPONSE_CONSTRUCTORS[self.REQUEST_CLS], # type: ignore
)
def _verify_request_lengths(
self, request: Dict[str, Any], model: str, max_tokens: int
) -> None:
"""Verify that the request length is not too long."""
encoder, max_length = self.get_tokenizer(model)
if not encoder or max_length < 0:
return
if isinstance(request["prompt"], str):
prompts = [request["prompt"]]
else:
prompts = request["prompt"]
for i in range(len(prompts)):
prompt = prompts[i]
encoded_prompt = encoder.encode(prompt)
if len(encoded_prompt) + max_tokens > max_length:
logger.warning(
f"Prompt {prompt} is too long for model {model}. "
"Truncating prompt from left."
)
# -20 to be safe
prompt = encoder.decode(
encoded_prompt[-int(max_length - max_tokens - 20) :]
)
prompts[i] = prompt
if isinstance(request["prompt"], str):
request["prompt"] = prompts[0]
else:
request["prompt"] = prompts
@retry(
reraise=True,
retry=retry_if_ratelimit,
wait=wait_random_exponential(min=1, max=ATTEMPTS_TIMEOUT),
stop=stop_after_attempt(ATTEMPTS_BEFORE_STOP),
)
def _run_completion(
self, request_params: Dict[str, Any], retry_timeout: int
) -> Dict:
"""Execute completion request.
Args:
request_params: request params.
retry_timeout: retry timeout.
Returns:
response as dict.
"""
request_params = self.preprocess_request_params(request_params)
post_str = self.get_generation_url()
res = requests.post(
post_str,
headers=self.get_generation_header(),
json=request_params,
timeout=retry_timeout,
)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
logger.error(res.json())
raise requests.exceptions.HTTPError(res.json())
return self.postprocess_response(res.json(), request_params)
@retry(
reraise=True,
retry=retry_if_ratelimit,
wait=wait_random_exponential(min=1, max=ATTEMPTS_TIMEOUT),
stop=stop_after_attempt(ATTEMPTS_BEFORE_STOP),
)
async def _arun_completion(
self, request_params: Dict[str, Any], retry_timeout: int
) -> Dict:
"""Async execute completion request.
Args:
request_params: request params.
retry_timeout: retry timeout.
Returns:
response as dict.
"""
request_params = self.preprocess_request_params(request_params)
post_str = self.get_generation_url()
async with aiohttp.ClientSession(timeout=retry_timeout) as session:
async with session.post(
post_str,
headers=self.get_generation_header(),
json=request_params,
timeout=retry_timeout,
) as res:
res.raise_for_status()
res_json = await res.json(content_type=None)
return self.postprocess_response(res_json, request_params)
@retry(
reraise=True,
retry=retry_if_ratelimit,
wait=wait_random_exponential(min=1, max=ATTEMPTS_TIMEOUT),
stop=stop_after_attempt(ATTEMPTS_BEFORE_STOP),
)
def _run_streaming_completion(
self, request_params: Dict[str, Any], retry_timeout: int
) -> Generator[Dict, None, None]:
"""Execute completion request streaming.
Args:
request_params: request params.
retry_timeout: retry timeout.
Returns:
response as dict.
"""
request_params = self.preprocess_request_params(request_params)
request_params["stream"] = True
post_str = self.get_generation_url()
res_iter = requests.post(
post_str,
headers=self.get_generation_header(),
json=request_params,
timeout=retry_timeout,
stream=True,
)
for res_token in res_iter.iter_lines():
if res_token:
decoded_res_token = res_token.decode("utf-8")
decoded_res_token = decoded_res_token.replace("data: ", "")
if decoded_res_token == "[DONE]":
break
try:
decoded_res_token_dct = json.loads(decoded_res_token)
postprocess_res_token_dct = self.postprocess_response(
decoded_res_token_dct, request_params
)
# If nothing is returned, skip
if (
not postprocess_res_token_dct
or not postprocess_res_token_dct["choices"]
):
continue
yield postprocess_res_token_dct
except Exception as e:
raise e
def run_request(self, request: Request) -> Response:
"""
Run request.
Args:
request: request.
Returns:
response.
"""
# Make everything list for consistency
if isinstance(request.prompt, list):
prompt_list = request.prompt
else:
prompt_list = [request.prompt]
request_params = self._get_request_params(request)
# Set the params as a list. Do not set the request
# object itself as the cache will then store it as a
# list which is inconsistent with the request input.
request_params["prompt"] = prompt_list
# If batch_size is not set, set it to 1
batch_size = request_params.pop("batch_size") or 1
if not self.supports_batch_inference() and batch_size != 1:
logger.warning(
f"{self.__class__.__name__} does not support batch inference."
" Setting batch size to 1"
)
batch_size = 1
# Take the default keys we need and drop the rest as they
# are not part of the model request.
retry_timeout = request_params.pop("client_timeout")
for key in DEFAULT_REQUEST_KEYS:
request_params.pop(key, None)
# Make sure requests are in the request length
# If no tokenizer is set or not LM request, this
# will do nothing
if isinstance(request, LMRequest):
self._verify_request_lengths(
request_params, model=request.engine, max_tokens=request.max_tokens
)
# Batch requests
num_batches = len(prompt_list) // batch_size
if len(prompt_list) % batch_size != 0:
batch_size = int(math.ceil(len(prompt_list) / (num_batches + 1)))
request_batches = self._split_requests(request_params, batch_size)
response_dicts = [
self._run_completion(batch, retry_timeout) for batch in request_batches
]
# Flatten responses
return self._stitch_responses(request, response_dicts)
async def arun_batch_request(
self, request: Request, verbose: bool = False
) -> Response:
"""
Run async request.
Args:
request: request.s
Returns:
response.
"""
required_batch_size = None
if not self.supports_batch_inference():
required_batch_size = 1
if not isinstance(request.prompt, list):
raise AssertionError(
"request.prompt must be a list for async batch inference."
)
request_params = self._get_request_params(request)
# Take the default keys we need and drop the rest as they
# are not part of the model request.
retry_timeout = request_params.pop("client_timeout")
batch_size = request_params.pop("batch_size")
batch_size = required_batch_size or batch_size
for key in DEFAULT_REQUEST_KEYS:
request_params.pop(key, None)
# Make sure requests are in the request length
# If no tokenizer is set or not LM request, this
# will do nothing
if isinstance(request, LMRequest):
self._verify_request_lengths(
request_params, model=request.engine, max_tokens=request.max_tokens
)
# Batch requests
num_batches = len(request.prompt) // batch_size
if len(request.prompt) % batch_size != 0:
batch_size = int(math.ceil(len(request.prompt) / (num_batches + 1)))
request_batches = self._split_requests(request_params, batch_size)
all_tasks = [
asyncio.create_task(self._arun_completion(batch, retry_timeout))
for batch in request_batches
]
responses = await tqdm.asyncio.tqdm.gather(*all_tasks, disable=not verbose)
# Flatten responses
return self._stitch_responses(request, responses)
def run_chat_request(
self,
request: LMChatRequest,
) -> Response:
"""
Get the response from chat model.
Args:
request: request.
Returns:
response.
"""
request_params = self._get_request_params(request)
# Take the default keys we need and drop the rest as they
# are not part of the model request.
retry_timeout = request_params.pop("client_timeout")
for key in DEFAULT_REQUEST_KEYS:
request_params.pop(key, None)
# Make sure requests are in the request length
# If no tokenizer is set or not LM request, this
# will do nothing
self._verify_request_lengths(
request_params, model=request.engine, max_tokens=request.max_tokens
)
response_dict = self._run_completion(request_params, retry_timeout)
usages = None
if "usage" in response_dict:
usages = [Usage(**usage) for usage in response_dict["usage"]]
return Response(
response=self._get_model_choices(response_dict),
cached=False,
request=request,
usages=Usages(usages=usages) if usages else None,
**RESPONSE_CONSTRUCTORS[LMChatRequest], # type: ignore
)
def run_streaming_request(
self, request: Request
) -> Generator[Response, None, None]:
"""
Run streaming request.
Args:
request: request.
Returns:
response.
"""
if not isinstance(request.prompt, str):
raise ValueError("Streaming requests must have a single prompt.")
if not self.supports_streaming_inference():
raise ValueError(
f"{self.__class__.__name__} does not support streaming inference."
)
request_params = self._get_request_params(request)
# Take the default keys we need and drop the rest as they
# are not part of the model request.
retry_timeout = request_params.pop("client_timeout")
for key in DEFAULT_REQUEST_KEYS:
request_params.pop(key, None)
# Make sure requests are in the request length
# If no tokenizer is set or not LM request, this
# will do nothing
if isinstance(request, LMRequest):
self._verify_request_lengths(
request_params, model=request.engine, max_tokens=request.max_tokens
)
for token_response in self._run_streaming_completion(
request_params, retry_timeout
):
yield self._stitch_responses(request, [token_response])
def run_score_prompt_request(
self,
request: LMScoreRequest,
) -> Response:
"""
Get the logit score of the prompt via a forward pass of the model.
Args:
request: request.
Returns:
response.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not support prompt scoring request."
)
| manifest-main | manifest/clients/client.py |
"""Diffuser client."""
import logging
from functools import lru_cache
from typing import Any, Dict, Optional
import numpy as np
import requests
from manifest.clients.client import Client
from manifest.request import DiffusionRequest
logger = logging.getLogger(__name__)
class DiffuserClient(Client):
"""Diffuser client."""
# User param -> (client param, default value)
PARAMS = {
"num_inference_steps": ("num_inference_steps", 50),
"height": ("height", 512),
"width": ("width", 512),
"n": ("num_images_per_prompt", 1),
"guidance_scale": ("guidance_scale", 7.5),
"eta": ("eta", 0.0),
}
REQUEST_CLS = DiffusionRequest
NAME = "diffuser"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the Diffuser url.
Arsg:
connection_str: connection string.
client_args: client arguments.
"""
self.host = connection_str.rstrip("/")
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
self.model_params = self.get_model_params()
def to_numpy(self, image: np.ndarray) -> np.ndarray:
"""Convert a numpy image to a PIL image.
Adapted from https://github.com/huggingface/diffusers/blob/src/diffusers/pipelines/pipeline_utils.py#L808 # noqa: E501
"""
image = (image * 255).round().astype("uint8")
return image
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/completions"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
@lru_cache(maxsize=1)
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
res = requests.post(self.host + "/params").json()
res["client_name"] = self.NAME
return res
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
# Convert array to np.array
for choice in response["choices"]:
choice["array"] = self.to_numpy(np.array(choice["array"]))
return response
| manifest-main | manifest/clients/diffuser.py |
"""Client init."""
| manifest-main | manifest/clients/__init__.py |
"""Google client."""
import logging
import os
import subprocess
from typing import Any, Dict, Optional, Type
from manifest.clients.client import Client
from manifest.request import LMRequest, Request
logger = logging.getLogger(__name__)
# https://cloud.google.com/vertex-ai/docs/generative-ai/start/quickstarts/api-quickstart
GOOGLE_ENGINES = {
"text-bison",
}
def get_project_id() -> Optional[str]:
"""Get project ID.
Run
`gcloud config get-value project`
"""
try:
project_id = subprocess.run(
["gcloud", "config", "get-value", "project"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if project_id.stderr.decode("utf-8").strip():
return None
return project_id.stdout.decode("utf-8").strip()
except Exception:
return None
class GoogleClient(Client):
"""Google client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "text-bison"),
"temperature": ("temperature", 1.0),
"max_tokens": ("maxOutputTokens", 10),
"top_p": ("topP", 1.0),
"top_k": ("topK", 1),
"batch_size": ("batch_size", 20),
}
REQUEST_CLS: Type[Request] = LMRequest
NAME = "google"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the GoogleVertex API.
connection_str is passed as default GOOGLE_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
connection_parts = connection_str.split("::")
if len(connection_parts) == 1:
self.api_key = connection_parts[0]
self.project_id = None
elif len(connection_parts) == 2:
self.api_key, self.project_id = connection_parts
else:
raise ValueError(
"Invalid connection string. "
"Must be either API_KEY or API_KEY::PROJECT_ID"
)
self.api_key = self.api_key or os.environ.get("GOOGLE_API_KEY")
if self.api_key is None:
raise ValueError(
"GoogleVertex API key not set. Set GOOGLE_API_KEY environment "
"variable or pass through `client_connection`. This can be "
"found by running `gcloud auth print-access-token`"
)
self.project_id = (
self.project_id or os.environ.get("GOOGLE_PROJECT_ID") or get_project_id()
)
if self.project_id is None:
raise ValueError("GoogleVertex project ID not set. Set GOOGLE_PROJECT_ID")
self.host = f"https://us-central1-aiplatform.googleapis.com/v1/projects/{self.project_id}/locations/us-central1/publishers/google/models" # noqa: E501
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in GOOGLE_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. Must be {GOOGLE_ENGINES}."
)
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
model = getattr(self, "engine")
return self.host + f"/{model}:predict"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {"Authorization": f"Bearer {self.api_key}"}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def preprocess_request_params(self, request: Dict[str, Any]) -> Dict[str, Any]:
"""
Preprocess request params.
Args:
request: request params.
Returns:
request params.
"""
# Refortmat the request params for google
prompt = request.pop("prompt")
if isinstance(prompt, str):
prompt_list = [prompt]
else:
prompt_list = prompt
google_request = {
"instances": [{"prompt": prompt} for prompt in prompt_list],
"parameters": request,
}
return super().preprocess_request_params(google_request)
def postprocess_response(
self, response: Dict[str, Any], request: Dict[str, Any]
) -> Dict[str, Any]:
"""
Validate response as dict.
Assumes response is dict
{
"predictions": [
{
"safetyAttributes": {
"categories": ["Violent", "Sexual"],
"blocked": false,
"scores": [0.1, 0.1]
},
"content": "SELECT * FROM "WWW";"
}
]
}
Args:
response: response
request: request
Return:
response as dict
"""
google_predictions = response.pop("predictions")
new_response = {
"choices": [
{
"text": prediction["content"],
}
for prediction in google_predictions
]
}
return super().postprocess_response(new_response, request)
| manifest-main | manifest/clients/google.py |
"""Google client."""
import copy
import logging
import os
from typing import Any, Dict, Optional, Type
from manifest.clients.google import GoogleClient, get_project_id
from manifest.request import LMRequest, Request
logger = logging.getLogger(__name__)
# https://cloud.google.com/vertex-ai/docs/generative-ai/start/quickstarts/api-quickstart
GOOGLE_ENGINES = {
"chat-bison",
}
class GoogleChatClient(GoogleClient):
"""GoogleChat client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "chat-bison"),
"temperature": ("temperature", 1.0),
"max_tokens": ("maxOutputTokens", 10),
"top_p": ("topP", 1.0),
"top_k": ("topK", 1),
"batch_size": ("batch_size", 20),
}
REQUEST_CLS: Type[Request] = LMRequest
NAME = "googlechat"
IS_CHAT = True
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the GoogleVertex API.
connection_str is passed as default GOOGLE_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
connection_parts = connection_str.split("::")
if len(connection_parts) == 1:
self.api_key = connection_parts[0]
elif len(connection_parts) == 2:
self.api_key, self.project_id = connection_parts
else:
raise ValueError(
"Invalid connection string. "
"Must be either API_KEY or API_KEY::PROJECT_ID"
)
self.api_key = self.api_key or os.environ.get("GOOGLE_API_KEY")
if self.api_key is None:
raise ValueError(
"GoogleVertex API key not set. Set GOOGLE_API_KEY environment "
"variable or pass through `client_connection`. This can be "
"found by running `gcloud auth print-access-token`"
)
self.project_id = (
self.project_id or os.environ.get("GOOGLE_PROJECT_ID") or get_project_id()
)
if self.project_id is None:
raise ValueError("GoogleVertex project ID not set. Set GOOGLE_PROJECT_ID")
self.host = f"https://us-central1-aiplatform.googleapis.com/v1/projects/{self.project_id}/locations/us-central1/publishers/google/models" # noqa: E501
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in GOOGLE_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. Must be {GOOGLE_ENGINES}."
)
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return False
def preprocess_request_params(self, request: Dict[str, Any]) -> Dict[str, Any]:
"""
Preprocess request params.
Args:
request: request params.
Returns:
request params.
"""
# Format for chat model
request = copy.deepcopy(request)
prompt = request.pop("prompt")
if isinstance(prompt, str):
messages = [{"author": "user", "content": prompt}]
elif isinstance(prompt, list) and isinstance(prompt[0], str):
prompt_list = prompt
messages = [{"author": "user", "content": prompt} for prompt in prompt_list]
elif isinstance(prompt, list) and isinstance(prompt[0], dict):
for pmt_dict in prompt:
if "author" not in pmt_dict or "content" not in pmt_dict:
raise ValueError(
"Prompt must be list of dicts with 'author' and 'content' "
f"keys. Got {prompt}."
)
messages = prompt
else:
raise ValueError(
"Prompt must be string, list of strings, or list of dicts."
f"Got {prompt}"
)
new_request = {
"instances": [{"messages": messages}],
"parameters": request,
}
return super(GoogleClient, self).preprocess_request_params(new_request)
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Validate response as dict.
Assumes response is dict
{
"candidates": [
{
"safetyAttributes": {
"categories": ["Violent", "Sexual"],
"blocked": false,
"scores": [0.1, 0.1]
},
"author": "1",
"content": "SELECT * FROM "WWW";"
}
]
}
Args:
response: response
request: request
Return:
response as dict
"""
google_predictions = response.pop("predictions")
new_response = {
"choices": [
{
"text": prediction["candidates"][0]["content"],
}
for prediction in google_predictions
]
}
return super(GoogleClient, self).postprocess_response(new_response, request)
| manifest-main | manifest/clients/google_chat.py |
"""OpenAI client."""
import logging
import os
from typing import Any, Dict, List, Optional, Type
import tiktoken
from manifest.clients.client import Client
from manifest.request import LMRequest, Request
logger = logging.getLogger(__name__)
OPENAI_ENGINES = {
"text-davinci-003",
"text-davinci-002",
"text-davinci-001",
"davinci",
"curie",
"ada",
"babbage",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
"code-davinci-002",
"code-cushman-001",
}
class OpenAIClient(Client):
"""OpenAI client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "text-davinci-003"),
"temperature": ("temperature", 1.0),
"max_tokens": ("max_tokens", 10),
"n": ("n", 1),
"top_p": ("top_p", 1.0),
"top_k": ("best_of", 1),
"logprobs": ("logprobs", None),
"stop_sequences": ("stop", None), # OpenAI doesn't like empty lists
"presence_penalty": ("presence_penalty", 0.0),
"frequency_penalty": ("frequency_penalty", 0.0),
"batch_size": ("batch_size", 20),
}
REQUEST_CLS: Type[Request] = LMRequest
NAME = "openai"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the OpenAI server.
connection_str is passed as default OPENAI_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
self.api_key = connection_str or os.environ.get("OPENAI_API_KEY")
if self.api_key is None:
raise ValueError(
"OpenAI API key not set. Set OPENAI_API_KEY environment "
"variable or pass through `client_connection`."
)
self.host = "https://api.openai.com/v1"
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in OPENAI_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. Must be {OPENAI_ENGINES}."
)
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/completions"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {"Authorization": f"Bearer {self.api_key}"}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return True
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Validate response as dict.
Args:
response: response
request: request
Return:
response as dict
"""
validated_response = super().postprocess_response(response, request)
# Handle logprobs
for choice in validated_response["choices"]:
if "logprobs" in choice:
logprobs = choice.pop("logprobs")
if logprobs and "token_logprobs" in logprobs:
choice["token_logprobs"] = logprobs["token_logprobs"]
choice["tokens"] = logprobs["tokens"]
return validated_response
def split_usage(self, request: Dict, choices: List[str]) -> List[Dict[str, int]]:
"""Split usage into list of usages for each prompt."""
try:
encoding = tiktoken.encoding_for_model(getattr(self, "engine"))
except Exception:
return []
prompt = request["prompt"]
# If n > 1 and prompt is a string, we need to split it into a list
if isinstance(prompt, str):
prompts = [prompt] * len(choices)
else:
prompts = prompt
assert len(prompts) == len(choices)
usages = []
for pmt, chc in zip(prompts, choices):
pmt_tokens = len(encoding.encode(pmt))
chc_tokens = len(encoding.encode(chc["text"])) # type: ignore
usage = {
"prompt_tokens": pmt_tokens,
"completion_tokens": chc_tokens,
"total_tokens": pmt_tokens + chc_tokens,
}
usages.append(usage)
return usages
| manifest-main | manifest/clients/openai.py |
"""Azure client."""
import logging
import os
from typing import Any, Dict, Optional
from manifest.clients.openai_chat import OPENAICHAT_ENGINES, OpenAIChatClient
from manifest.request import LMRequest
logger = logging.getLogger(__name__)
# Azure deployment name can only use letters and numbers, no spaces. Hyphens ("-") and
# underscores ("_") may be used, except as ending characters. We create this mapping to
# handle difference between Azure and OpenAI
AZURE_DEPLOYMENT_NAME_MAPPING = {
"gpt-3.5-turbo": "gpt-35-turbo",
"gpt-3.5-turbo-0301": "gpt-35-turbo-0301",
}
OPENAI_DEPLOYMENT_NAME_MAPPING = {
"gpt-35-turbo": "gpt-3.5-turbo",
"gpt-35-turbo-0301": "gpt-3.5-turbo-0301",
}
class AzureChatClient(OpenAIChatClient):
"""Azure chat client."""
# User param -> (client param, default value)
PARAMS = OpenAIChatClient.PARAMS
REQUEST_CLS = LMRequest
NAME = "azureopenaichat"
IS_CHAT = True
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the AzureOpenAI server.
connection_str is passed as default AZURE_OPENAI_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
self.api_key, self.host = None, None
if connection_str:
connection_parts = connection_str.split("::")
if len(connection_parts) == 1:
self.api_key = connection_parts[0]
elif len(connection_parts) == 2:
self.api_key, self.host = connection_parts
else:
raise ValueError(
"Invalid connection string. "
"Must be either AZURE_OPENAI_KEY or "
"AZURE_OPENAI_KEY::AZURE_OPENAI_ENDPOINT"
)
self.api_key = self.api_key or os.environ.get("AZURE_OPENAI_KEY")
if self.api_key is None:
raise ValueError(
"AzureOpenAI API key not set. Set AZURE_OPENAI_KEY environment "
"variable or pass through `client_connection`."
)
self.host = self.host or os.environ.get("AZURE_OPENAI_ENDPOINT")
if self.host is None:
raise ValueError(
"Azure Service URL not set "
"(e.g. https://openai-azure-service.openai.azure.com/)."
" Set AZURE_OPENAI_ENDPOINT or pass through `client_connection`."
" as AZURE_OPENAI_KEY::AZURE_OPENAI_ENDPOINT"
)
self.host = self.host.rstrip("/")
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in OPENAICHAT_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. "
f"Must be {OPENAICHAT_ENGINES}."
)
def get_generation_url(self) -> str:
"""Get generation URL."""
engine = getattr(self, "engine")
deployment_name = AZURE_DEPLOYMENT_NAME_MAPPING.get(engine, engine)
return (
self.host
+ "/openai/deployments/"
+ deployment_name
+ "/chat/completions?api-version=2023-05-15"
)
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {"api-key": f"{self.api_key}"}
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
# IMPORTANT!!!
# Azure models are the same as openai models. So we want to unify their
# cached. Make sure we retrun the OpenAI name here.
return {"model_name": OpenAIChatClient.NAME, "engine": getattr(self, "engine")}
| manifest-main | manifest/clients/azureopenai_chat.py |
"""Hugging Face client."""
import logging
from functools import lru_cache
from typing import Any, Dict, Optional
import requests
from manifest.clients.client import Client
from manifest.request import DEFAULT_REQUEST_KEYS, LMRequest, LMScoreRequest
from manifest.response import LMModelChoice, ModelChoices, Response
logger = logging.getLogger(__name__)
class HuggingFaceClient(Client):
"""HuggingFace client."""
# User param -> (client param, default value)
PARAMS = {
"temperature": ("temperature", 1.0),
"max_tokens": ("max_tokens", 10),
"n": ("n", 1),
"top_p": ("top_p", 1.0),
"top_k": ("top_k", 50),
"repetition_penalty": ("repetition_penalty", 1.0),
"do_sample": ("do_sample", True),
}
REQUEST_CLS = LMRequest
NAME = "huggingface"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the HuggingFace url.
Arsg:
connection_str: connection string.
client_args: client arguments.
"""
if not connection_str:
raise ValueError("Must provide connection string")
self.host = connection_str.rstrip("/")
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/completions"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
@lru_cache(maxsize=1)
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
res = requests.post(self.host + "/params").json()
res["client_name"] = self.NAME
return res
def run_score_prompt_request(
self,
request: LMScoreRequest,
) -> Response:
"""
Get the logit score of the prompt via a forward pass of the model.
Args:
request: request.
Returns:
request function that takes no input.
request parameters as dict.
"""
request_params = self._get_request_params(request)
retry_timeout = request_params.pop("client_timeout")
for key in DEFAULT_REQUEST_KEYS:
request_params.pop(key, None)
# Do not add params like we do with request as the model isn't sampling
request_params = {"prompt": request.prompt}
post_str = self.host + "/score_sequence"
try:
res = requests.post(
post_str,
json=request_params,
timeout=retry_timeout,
)
res.raise_for_status()
except requests.Timeout as e:
logger.error("HF request timed out. Increase client_timeout.")
raise e
except requests.exceptions.HTTPError as e:
logger.error(res.text)
raise e
response_dict = res.json()
return Response(
response=ModelChoices(
choices=[LMModelChoice(**choice) for choice in response_dict["choices"]]
),
cached=False,
request=request,
usages=None,
response_type="text",
request_type=LMScoreRequest,
)
| manifest-main | manifest/clients/huggingface.py |
"""AI21 client."""
import logging
import os
from typing import Any, Dict, Optional
from manifest.clients.client import Client
from manifest.request import LMRequest
logger = logging.getLogger(__name__)
AI21_ENGINES = {
"j2-ultra",
"j2-mid",
"j2-light",
}
class AI21Client(Client):
"""AI21Client client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("engine", "j2-ultra"),
"temperature": ("temperature", 0.7),
"max_tokens": ("maxTokens", 40),
"top_k": ("topKReturn", 0),
"n": ("numResults", 1),
"top_p": ("topP", 1.0),
"stop_sequences": ("stopSequences", []),
}
REQUEST_CLS = LMRequest
NAME = "ai21"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the AI21 server.
connection_str is passed as default AI21_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
# Taken from https://docs.ai21.com/
self.host = "https://api.ai21.com/studio/v1"
self.api_key = connection_str or os.environ.get("AI21_API_KEY")
if self.api_key is None:
raise ValueError(
"AI21 API key not set. Set AI21_API_KEY environment "
"variable or pass through `client_connection`."
)
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in AI21_ENGINES:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. Must be {AI21_ENGINES}."
)
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/" + getattr(self, "engine") + "/complete"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {"Authorization": f"Bearer {self.api_key}"}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return False
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
return {
"object": "text_completion",
"model": getattr(self, "engine"),
"choices": [
{
"text": item["data"]["text"],
"token_logprobs": item["data"]["tokens"],
}
for item in response["completions"]
],
}
| manifest-main | manifest/clients/ai21.py |
"""Cohere client."""
import logging
import os
from typing import Any, Dict, Optional
from manifest.clients.client import Client
from manifest.request import LMRequest
logger = logging.getLogger(__name__)
COHERE_MODELS = {"small", "medium", "large", "xlarge"}
class CohereClient(Client):
"""Cohere client."""
# Params are defined in https://docs.cohere.ai/generate-reference
PARAMS = {
"engine": ("model", "xlarge"),
"max_tokens": ("max_tokens", 20),
"temperature": ("temperature", 0.75),
"n": ("num_generations", 1),
"top_k": ("k", 0),
"top_p": ("p", 0.75),
"frequency_penalty": ("frequency_penalty", 0.0),
"presence_penalty": ("presence_penalty", 0.0),
"stop_sequences": ("stop_sequences", None),
}
REQUEST_CLS = LMRequest
NAME = "cohere"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to the Cohere server.
connection_str is passed as default COHERE_API_KEY if variable not set.
Args:
connection_str: connection string.
client_args: client arguments.
"""
self.api_key = connection_str or os.environ.get("COHERE_API_KEY")
if self.api_key is None:
raise ValueError(
"Cohere API key not set. Set COHERE_API_KEY environment "
"variable or pass through `client_connection`."
)
self.host = "https://api.cohere.ai"
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
if getattr(self, "engine") not in COHERE_MODELS:
raise ValueError(
f"Invalid engine {getattr(self, 'engine')}. Must be {COHERE_MODELS}."
)
def close(self) -> None:
"""Close the client."""
def get_generation_url(self) -> str:
"""Get generation URL."""
return self.host + "/generate"
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {
"Cohere-Version": "2021-11-08",
"Authorization": f"Bearer {self.api_key}",
}
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return False
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"model_name": self.NAME, "engine": getattr(self, "engine")}
def postprocess_response(self, response: Dict, request: Dict) -> Dict[str, Any]:
"""
Format response to dict.
Args:
response: response
request: request
Return:
response as dict
"""
return {
"object": "text_completion",
"model": getattr(self, "engine"),
"choices": [
{
"text": item["text"],
"text_logprob": item.get("likelihood", None),
"token_logprobs": item.get("token_likelihoods", None),
}
for item in response["generations"]
],
}
| manifest-main | manifest/clients/cohere.py |
"""Dummy client."""
import hashlib
import logging
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import tiktoken
from manifest.clients.client import Client
from manifest.request import LMChatRequest, LMRequest, LMScoreRequest, Request
from manifest.response import LMModelChoice, ModelChoices, Response, Usage, Usages
logger = logging.getLogger(__name__)
class DummyClient(Client):
"""Dummy client."""
# User param -> (client param, default value)
PARAMS = {
"engine": ("model", "text-davinci-003"),
"temperature": ("temperature", 0.0),
"max_tokens": ("max_tokens", 10),
"n": ("n", 1),
"top_p": ("top_p", 1.0),
"top_k": ("best_of", 1),
"batch_size": ("batch_size", 20),
}
REQUEST_CLS = LMRequest
NAME = "dummy"
def connect(
self,
connection_str: Optional[str] = None,
client_args: Dict[str, Any] = {},
) -> None:
"""
Connect to dummpy server.
This is a dummy client that returns identity responses. Used for testing.
Args:
connection_str: connection string.
client_args: client arguments.
"""
# We tiktoken as it is faster than HF for tokenizing
# Use any model to create the tokenizer
self.encoder = tiktoken.get_encoding("cl100k_base")
for key in self.PARAMS:
setattr(self, key, client_args.pop(key, self.PARAMS[key][1]))
def close(self) -> None:
"""Close the client."""
pass
def get_generation_url(self) -> str:
"""Get generation URL."""
return "dummy"
def supports_batch_inference(self) -> bool:
"""Return whether the client supports batch inference."""
return True
def supports_streaming_inference(self) -> bool:
"""Return whether the client supports streaming inference.
Override in child client class.
"""
return False
def get_generation_header(self) -> Dict[str, str]:
"""
Get generation header.
Returns:
header.
"""
return {}
def get_model_params(self) -> Dict:
"""
Get model params.
By getting model params from the server, we can add to request
and make sure cache keys are unique to model.
Returns:
model params.
"""
return {"engine": "dummy", "model": getattr(self, "engine")}
def get_mock_output(
self, output_toks: int, is_completion: bool, seed: Optional[int] = None
) -> LMModelChoice:
"""Return mock model output by generating random tokens."""
np.random.seed(seed)
random_tokens = np.random.randint(
0, self.encoder.max_token_value + 1, output_toks
)
response = self.encoder.decode(random_tokens) # type: ignore
if is_completion:
np.random.seed(seed)
random_logprobs = np.random.uniform(
low=-2, high=-0.00001, size=output_toks
).tolist()
else:
# Return all Nones to mimic chat models
# OpenAI chat models do not return logprobs
random_logprobs = [None] * output_toks
return LMModelChoice(
text=response,
token_logprobs=random_logprobs,
tokens=random_tokens.tolist(),
)
def get_mock_choices(
self,
prompt_list: List[str],
request_params: Dict,
is_completion: bool,
) -> Tuple[List[LMModelChoice], List[Usage]]:
"""Get choices and usages of mock output."""
choices = []
usages = []
for prompt in prompt_list:
num_prompt_tokens = len(self.encoder.encode(prompt))
if request_params["temperature"] == 0:
# Get integer seed from hash of prompt
seed = (
int(hashlib.sha256(prompt.encode("utf-8")).hexdigest(), 16)
% 10**8
)
else:
# Get random seed
seed = None
for _ in range(int(request_params["n"])):
choice = self.get_mock_output(
request_params["max_tokens"], is_completion=is_completion, seed=seed
)
choices.append(choice)
usages.append(
Usage(
prompt_tokens=num_prompt_tokens,
completion_tokens=request_params["max_tokens"],
total_tokens=num_prompt_tokens + request_params["max_tokens"],
)
)
return choices, usages
def run_request(self, request: Request) -> Response:
"""
Get request string function.
Args:
request: request.
Returns:
request function that takes no input.
request parameters as dict.
"""
if isinstance(request.prompt, list):
prompt_list = request.prompt
else:
prompt_list = [request.prompt]
request_params = request.to_dict(self.PARAMS)
choices, usages = self.get_mock_choices(
prompt_list, request_params, is_completion=True
)
return Response(
response=ModelChoices(choices=choices), # type: ignore
cached=False,
request=request,
usages=Usages(usages=usages),
response_type="text",
request_type=self.REQUEST_CLS,
)
async def arun_batch_request(
self, request: Request, verbose: bool = False
) -> Response:
"""
Get async request string function.
Args:
request: request.
Returns:
response.
"""
return self.run_request(request)
def run_chat_request(
self,
request: LMChatRequest,
) -> Response:
"""
Get the response from chat model.
Args:
request: request.
Returns:
response.
"""
prompt_list = ["_".join(pmp["content"] for pmp in request.prompt)]
request_params = request.to_dict(self.PARAMS)
choices, usages = self.get_mock_choices(
prompt_list, request_params, is_completion=False
)
return Response(
response=ModelChoices(choices=choices), # type: ignore
cached=False,
request=request,
usages=Usages(usages=usages),
response_type="text",
request_type=LMChatRequest,
)
def run_score_prompt_request(
self,
request: LMScoreRequest,
) -> Response:
"""
Get the logit score of the prompt via a forward pass of the model.
Args:
request: request.
Returns:
request function that takes no input.
request parameters as dict.
"""
if isinstance(request.prompt, list):
prompt_list = request.prompt
else:
prompt_list = [request.prompt]
request_params = request.to_dict(self.PARAMS)
choices, usages = self.get_mock_choices(
prompt_list, request_params, is_completion=True
)
return Response(
response=ModelChoices(choices=choices), # type: ignore
cached=False,
request=request,
usages=Usages(usages=usages),
response_type="text",
request_type=LMScoreRequest,
)
| manifest-main | manifest/clients/dummy.py |
"""Client connection."""
import logging
import time
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel, Extra
from manifest.clients.ai21 import AI21Client
from manifest.clients.azureopenai import AzureClient
from manifest.clients.azureopenai_chat import AzureChatClient
from manifest.clients.client import Client
from manifest.clients.cohere import CohereClient
from manifest.clients.dummy import DummyClient
from manifest.clients.google import GoogleClient
from manifest.clients.google_chat import GoogleChatClient
from manifest.clients.huggingface import HuggingFaceClient
from manifest.clients.huggingface_embedding import HuggingFaceEmbeddingClient
from manifest.clients.openai import OpenAIClient
from manifest.clients.openai_chat import OpenAIChatClient
from manifest.clients.openai_embedding import OpenAIEmbeddingClient
from manifest.clients.toma import TOMAClient
from manifest.connections.scheduler import RandomScheduler, RoundRobinScheduler
logging.getLogger("openai").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
CLIENT_CONSTRUCTORS = {
AI21Client.NAME: AI21Client,
AzureClient.NAME: AzureClient,
AzureChatClient.NAME: AzureChatClient,
CohereClient.NAME: CohereClient,
DummyClient.NAME: DummyClient,
GoogleClient.NAME: GoogleClient,
GoogleChatClient.NAME: GoogleChatClient,
HuggingFaceClient.NAME: HuggingFaceClient,
HuggingFaceEmbeddingClient.NAME: HuggingFaceEmbeddingClient,
OpenAIClient.NAME: OpenAIClient,
OpenAIChatClient.NAME: OpenAIChatClient,
OpenAIEmbeddingClient.NAME: OpenAIEmbeddingClient,
TOMAClient.NAME: TOMAClient,
}
CLIENT_REQUEST_TYPES: Dict[str, Type] = {
k: v.REQUEST_CLS for k, v in CLIENT_CONSTRUCTORS.items()
}
# Diffusion
DIFFUSION_CLIENTS = ["diffuser", "tomadiffuser"]
try:
from manifest.clients.diffuser import DiffuserClient
from manifest.clients.toma_diffuser import TOMADiffuserClient
CLIENT_CONSTRUCTORS[DiffuserClient.NAME] = DiffuserClient
CLIENT_CONSTRUCTORS[TOMADiffuserClient.NAME] = TOMADiffuserClient
except Exception:
logger.info("Diffusion not supported. Skipping import.")
pass
SCHEDULER_CONSTRUCTORS = {
RandomScheduler.NAME: RandomScheduler,
RoundRobinScheduler.NAME: RoundRobinScheduler,
}
class Timing(BaseModel):
"""Timing class."""
start: float = -1.0
end: float = -1.0
class ClientConnection(BaseModel):
"""Client Connection class."""
client_name: str
# Use environment variables (depending on client)
client_connection: Optional[str] = None
# Use default engine
engine: Optional[str] = None
# Prevent extra args
class Config:
"""Config class.
Allows to override pydantic behavior.
"""
extra = Extra.forbid
class ClientConnectionPool:
"""Client connection pool."""
def __init__(
self,
client_pool: List[ClientConnection],
client_pool_scheduler: str = "round_robin",
client_args: Dict[str, Any] = {},
):
"""Init."""
# Verify the clients are allowed and supported
for client in client_pool:
if client.client_name not in CLIENT_CONSTRUCTORS:
if client.client_name in DIFFUSION_CLIENTS:
raise ImportError(
f"Diffusion client {client.client_name} requires "
"the proper install. Make sure to run "
"`pip install manifest-ml[diffusers]` "
"or install Pillow."
)
else:
raise ValueError(
f"Unknown client name: {client.client_name}. "
f"Choices are {list(CLIENT_CONSTRUCTORS.keys())}"
)
# Verify that the serialization of all clients is the same
request_types = set(
[CLIENT_REQUEST_TYPES[client.client_name] for client in client_pool]
)
if len(request_types) > 1:
raise ValueError(
"All clients in the client pool must use the same request type. "
f"You have {sorted(list(map(str, request_types)))}"
)
# Verify scheduler
if client_pool_scheduler not in SCHEDULER_CONSTRUCTORS:
raise ValueError(f"Unknown scheduler: {client_pool_scheduler}.")
self.request_type = request_types.pop()
# Initialize the clients
# We must keep track of the used args so we know
# if a user passed in an arg that was never used
used_args = set()
self.client_pool = []
for client in client_pool:
to_pass_kwargs = client_args.copy()
# Override the engine param for each
to_pass_kwargs.pop("engine", None)
if client.engine:
to_pass_kwargs["engine"] = client.engine
self.client_pool.append(
CLIENT_CONSTRUCTORS[client.client_name]( # type: ignore
client.client_connection, client_args=to_pass_kwargs
)
)
# Udpate used args
for k in client_args:
if k not in to_pass_kwargs:
used_args.add(k)
# Removed used args
for k in used_args:
client_args.pop(k)
# Get the scheduler
self.scheduler = SCHEDULER_CONSTRUCTORS[client_pool_scheduler](
num_clients=len(self.client_pool)
)
self.current_client_id = 0
# Record timing metrics for each client for load balancing
# TODO: Implement this in the future
self.client_pool_metrics = [Timing() for _ in self.client_pool]
def close(self) -> None:
"""Close."""
for client in self.client_pool:
client.close()
def num_clients(self) -> int:
"""Get number of clients."""
return len(self.client_pool)
def get_next_client(self) -> Client:
"""Get client."""
client_int = self.scheduler.get_client()
self.current_client_id = client_int
return self.client_pool[client_int]
def get_current_client(self) -> Client:
"""Get current client."""
return self.client_pool[self.current_client_id]
def start_timer(self) -> None:
"""Start timer."""
self.client_pool_metrics[self.current_client_id].start = time.time()
def end_timer(self) -> None:
"""End timer."""
self.client_pool_metrics[self.current_client_id].end = time.time()
| manifest-main | manifest/connections/client_pool.py |
"""Connection init."""
| manifest-main | manifest/connections/__init__.py |
"""Request client schedulers.
Supports random selection and round robin selection.
"""
import numpy as np
class Scheduler:
"""Scheduler base class."""
NAME: str = "scheduler"
def __init__(self, num_clients: int):
"""Initialize scheduler."""
self.num_clients = num_clients
def get_client(self) -> int:
"""Get client by id."""
raise NotImplementedError
class RandomScheduler(Scheduler):
"""Random scheduler."""
NAME: str = "random"
def __init__(self, num_clients: int):
"""Initialize scheduler."""
super().__init__(num_clients)
# Set seed
np.random.seed(0)
def get_client(self) -> int:
"""Get client by id."""
return np.random.randint(self.num_clients)
class RoundRobinScheduler(Scheduler):
"""Round robin scheduler."""
NAME: str = "round_robin"
def __init__(self, num_clients: int):
"""Initialize scheduler."""
super().__init__(num_clients)
self.current_client = 0
def get_client(self) -> int:
"""Get client by id."""
client = self.current_client
self.current_client = (self.current_client + 1) % self.num_clients
return client
| manifest-main | manifest/connections/scheduler.py |
"""Api init."""
| manifest-main | manifest/api/__init__.py |
"""Response."""
import time
import uuid
from typing import Any, Dict, List
class ModelResponse:
"""ModelResponse."""
def __init__(self, results: List[Dict[str, Any]], response_type: str) -> None:
"""Initialize response."""
self.results = results
self.response_type = response_type
if self.response_type not in {
"text_completion",
"prompt_logit_score",
"image_generation",
"embedding_generation",
}:
raise ValueError(
f"Invalid response type: {self.response_type}. "
"Must be one of: text_completion, prompt_logit_score, "
"image_generation, embedding_generation."
)
self.response_id = str(uuid.uuid4())
self.created = int(time.time())
def __dict__(self) -> Dict[str, Any]: # type: ignore
"""Return dictionary representation of response."""
key = (
"text"
if self.response_type not in {"image_generation", "embedding_generation"}
else "array"
)
return {
"id": self.response_id,
"object": self.response_type,
"created": self.created,
"model": "flask_model",
"choices": [
{
key: result[key],
"logprob": result["logprob"],
"tokens": result["tokens"],
"token_logprobs": result["token_logprobs"],
}
if key == "text"
else {
key: result[key].tolist(),
"logprob": result["logprob"],
}
for result in self.results
],
}
| manifest-main | manifest/api/response.py |
"""Flask app."""
import argparse
import io
import json
import logging
import os
import socket
from typing import Dict
import pkg_resources
from flask import Flask, Response, request
from manifest.api.models.diffuser import DiffuserModel
from manifest.api.models.huggingface import (
MODEL_GENTYPE_REGISTRY,
CrossModalEncoderModel,
TextGenerationModel,
)
from manifest.api.models.sentence_transformer import SentenceTransformerModel
from manifest.api.response import ModelResponse
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = logging.getLogger(__name__)
app = Flask(__name__) # define app using Flask
# Will be global
model = None
model_type = None
PORT = int(os.environ.get("FLASK_PORT", 5000))
MODEL_CONSTRUCTORS = {
"huggingface": TextGenerationModel,
"sentence_transformers": SentenceTransformerModel,
"huggingface_crossmodal": CrossModalEncoderModel,
"diffuser": DiffuserModel,
}
def parse_args() -> argparse.Namespace:
"""Generate args."""
parser = argparse.ArgumentParser(description="Model args")
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type used for finding constructor.",
choices=MODEL_CONSTRUCTORS.keys(),
)
parser.add_argument(
"--model_generation_type",
default=None,
type=str,
help="Model generation type.",
choices=MODEL_GENTYPE_REGISTRY.keys(),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="Name of model or path to model. Used in initialize of model class.",
)
parser.add_argument(
"--cache_dir", default=None, type=str, help="Cache directory for models."
)
parser.add_argument(
"--device", type=int, default=0, help="Model device. -1 for CPU."
)
parser.add_argument(
"--fp16", action="store_true", help="Force use fp16 for model params."
)
parser.add_argument(
"--percent_max_gpu_mem_reduction",
type=float,
default=0.85,
help="Used with accelerate multigpu. Scales down max memory.",
)
parser.add_argument(
"--use_bitsandbytes",
action="store_true",
help=("Use bits and bytes. " "This will override --device parameter."),
)
parser.add_argument(
"--use_accelerate_multigpu",
action="store_true",
help=(
"Use accelerate for multi gpu inference. "
"This will override --device parameter."
),
)
parser.add_argument(
"--use_hf_parallelize",
action="store_true",
help=(
"Use HF parallelize for multi gpu inference. "
"This will override --device parameter."
),
)
parser.add_argument(
"--use_deepspeed",
action="store_true",
help=("Use deepspeed. This will override --device parameter."),
)
args = parser.parse_args()
return args
def is_port_in_use(port: int) -> bool:
"""Check if port is in use."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def main() -> None:
"""Run main."""
kwargs = parse_args()
if is_port_in_use(PORT):
raise ValueError(f"Port {PORT} is already in use.")
global model_type
model_type = kwargs.model_type
model_gen_type = kwargs.model_generation_type
model_name_or_path = kwargs.model_name_or_path
if not model_name_or_path:
raise ValueError("Must provide model_name_or_path.")
if kwargs.use_accelerate_multigpu:
logger.info("Using accelerate. Overridding --device argument.")
if (
kwargs.percent_max_gpu_mem_reduction <= 0
or kwargs.percent_max_gpu_mem_reduction > 1
):
raise ValueError("percent_max_gpu_mem_reduction must be in (0, 1].")
if (
sum(
[
kwargs.use_accelerate_multigpu,
kwargs.use_hf_parallelize,
kwargs.use_bitsandbytes,
kwargs.use_deepspeed,
]
)
> 1
):
raise ValueError(
"Only one of use_accelerate_multigpu, use_hf_parallelize, "
"use_bitsandbytes, and use_deepspeed can be set."
)
# Global model
global model
model = MODEL_CONSTRUCTORS[model_type](
model_name_or_path,
model_type=model_gen_type,
cache_dir=kwargs.cache_dir,
device=kwargs.device,
use_accelerate=kwargs.use_accelerate_multigpu,
use_parallelize=kwargs.use_hf_parallelize,
use_bitsandbytes=kwargs.use_bitsandbytes,
use_deepspeed=kwargs.use_deepspeed,
perc_max_gpu_mem_red=kwargs.percent_max_gpu_mem_reduction,
use_fp16=kwargs.fp16,
)
app.run(host="0.0.0.0", port=PORT)
@app.route("/completions", methods=["POST"])
def completions() -> Response:
"""Get completions for generation."""
prompt = request.json["prompt"]
del request.json["prompt"]
generation_args = request.json
if not isinstance(prompt, (str, list)):
raise ValueError("Prompt must be a str or list of str")
try:
result_gens = []
for generations in model.generate(prompt, **generation_args):
result_gens.append(generations)
if model_type == "diffuser":
# Assign None logprob as it's not supported in diffusers
results = [
{"array": r[0], "logprob": None, "tokens": None, "token_logprobs": None}
for r in result_gens
]
res_type = "image_generation"
else:
results = [
{"text": r[0], "logprob": r[1], "tokens": r[2], "token_logprobs": r[3]}
for r in result_gens
]
res_type = "text_completion"
# transform the result into the openai format
return Response(
json.dumps(ModelResponse(results, response_type=res_type).__dict__()),
status=200,
)
except Exception as e:
logger.error(e)
return Response(
json.dumps({"message": str(e)}),
status=400,
)
@app.route("/embed", methods=["POST"])
def embed() -> Response:
"""Get embed for generation."""
if "modality" in request.json:
modality = request.json["modality"]
else:
modality = "text"
if modality == "text":
prompts = request.json["prompt"]
elif modality == "image":
import base64
from PIL import Image
prompts = [
Image.open(io.BytesIO(base64.b64decode(data)))
for data in request.json["prompt"]
]
else:
raise ValueError("modality must be text or image")
try:
results = []
embeddings = model.embed(prompts)
for embedding in embeddings:
results.append(
{
"array": embedding,
"logprob": None,
"tokens": None,
"token_logprobs": None,
}
)
return Response(
json.dumps(
ModelResponse(results, response_type="embedding_generation").__dict__()
),
status=200,
)
except Exception as e:
logger.error(e)
return Response(
json.dumps({"message": str(e)}),
status=400,
)
@app.route("/score_sequence", methods=["POST"])
def score_sequence() -> Response:
"""Get logprob of prompt."""
prompt = request.json["prompt"]
del request.json["prompt"]
generation_args = request.json
if not isinstance(prompt, (str, list)):
raise ValueError("Prompt must be a str or list of str")
try:
score_list = model.score_sequence(prompt, **generation_args)
results = [
{
"text": prompt if isinstance(prompt, str) else prompt[i],
"logprob": r[0],
"tokens": r[1],
"token_logprobs": r[2],
}
for i, r in enumerate(score_list)
]
# transform the result into the openai format
return Response(
json.dumps(
ModelResponse(results, response_type="prompt_logit_score").__dict__()
),
status=200,
)
except Exception as e:
logger.error(e)
return Response(
json.dumps({"message": str(e)}),
status=400,
)
@app.route("/params", methods=["POST"])
def params() -> Dict:
"""Get model params."""
return model.get_init_params()
@app.route("/")
def index() -> str:
"""Get index completion."""
fn = pkg_resources.resource_filename("metaseq", "service/index.html")
with open(fn) as f:
return f.read()
if __name__ == "__main__":
main()
| manifest-main | manifest/api/app.py |
"""Sentence transformer model."""
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from sentence_transformers import SentenceTransformer
from manifest.api.models.model import Model
class SentenceTransformerModel(Model):
"""SentenceTransformer model."""
def __init__(
self,
model_name_or_path: str,
model_type: Optional[str] = None,
model_config: Optional[str] = None,
cache_dir: Optional[str] = None,
device: int = 0,
use_accelerate: bool = False,
use_parallelize: bool = False,
use_bitsandbytes: bool = False,
use_deepspeed: bool = False,
perc_max_gpu_mem_red: float = 1.0,
use_fp16: bool = False,
):
"""
Initialize model.
All arguments will be passed in the request from Manifest.
Args:
model_name_or_path: model name string.
model_config: model config string.
cache_dir: cache directory for model.
device: device to use for model.
use_accelerate: whether to use accelerate for multi-gpu inference.
use_parallelize: use HF default parallelize
use_bitsandbytes: use HF bits and bytes
use_deepspeed: use deepspeed
perc_max_gpu_mem_red: percent max memory reduction in accelerate
use_fp16: use fp16 for model weights.
"""
if use_accelerate or use_parallelize or use_bitsandbytes or use_deepspeed:
raise ValueError(
"Cannot use accelerate or parallelize or "
"bitsandbytes or deepspeeed with sentence transformers"
)
# Check if providing path
self.model_name = model_name_or_path
print("Model Name:", self.model_name)
torch_device = (
torch.device("cpu")
if (device == -1 or not torch.cuda.is_available())
else torch.device(f"cuda:{device}")
)
self.embedding_model = SentenceTransformer(self.model_name, device=torch_device)
self.embedding_model.to(torch_device)
self.embedding_model.eval()
def get_init_params(self) -> Dict:
"""Return init params to determine what model is being used."""
return {"model_name": self.model_name, "model_path": self.model_name}
@torch.no_grad()
def generate(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[Any, float, List[int], List[float]]]:
"""
Generate the prompt from model.
Outputs must be generated text and score, not including prompt.
Args:
prompt: promt to generate from.
Returns:
list of generated text (list of length 1 for 1 generation).
"""
raise NotImplementedError("Generate not supported for sentence transformers")
@torch.no_grad()
def embed(self, prompt: Union[str, List[str]], **kwargs: Any) -> np.ndarray:
"""
Embed the prompt from model.
Args:
prompt: promt to embed from.
Returns:
list of embeddings (list of length 1 for 1 embedding).
"""
if isinstance(prompt, str):
prompt = [prompt]
return self.embedding_model.encode(prompt)
@torch.no_grad()
def score_sequence(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[float, List[int], List[float]]]:
"""
Score a sequence of choices.
Args:
prompt (:obj:`str` or :obj:`List[str]`):
The prompt to score the choices against.
**kwargs:
Additional keyword arguments passed along to the :obj:`__call__` method.
"""
raise NotImplementedError(
"Score sequence not supported for sentence transformers"
)
| manifest-main | manifest/api/models/sentence_transformer.py |
"""Diffuser model."""
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers import StableDiffusionPipeline
from manifest.api.models.model import Model
class DiffuserModel(Model):
"""Diffuser model."""
def __init__(
self,
model_name_or_path: str,
model_type: Optional[str] = None,
model_config: Optional[str] = None,
cache_dir: Optional[str] = None,
device: int = 0,
use_accelerate: bool = False,
use_parallelize: bool = False,
use_bitsandbytes: bool = False,
use_deepspeed: bool = False,
perc_max_gpu_mem_red: float = 1.0,
use_fp16: bool = False,
):
"""
Initialize model.
All arguments will be passed in the request from Manifest.
Args:
model_name_or_path: model name string.
model_config: model config string.
cache_dir: cache directory for model.
device: device to use for model.
use_accelerate: whether to use accelerate for multi-gpu inference.
use_parallelize: use HF default parallelize
use_bitsandbytes: use HF bits and bytes
use_deepspeed: use deepspeed
perc_max_gpu_mem_red: percent max memory reduction in accelerate
use_fp16: use fp16 for model weights.
"""
if use_accelerate or use_parallelize or use_bitsandbytes or use_deepspeed:
raise ValueError(
"Cannot use accelerate or parallelize or "
"bitsandbytes or deepspeeed with diffusers"
)
# Check if providing path
self.model_path = model_name_or_path
if Path(self.model_path).exists() and Path(self.model_path).is_dir():
model_name_or_path = Path(self.model_path).name
self.model_name = model_name_or_path
print("Model Name:", self.model_name, "Model Path:", self.model_path)
dtype = torch.float16 if use_fp16 else None
torch_device = (
torch.device("cpu")
if (device == -1 or not torch.cuda.is_available())
else torch.device(f"cuda:{device}")
)
self.pipeline = StableDiffusionPipeline.from_pretrained(
self.model_path,
torch_dtype=dtype,
revision="fp16" if str(dtype) == "float16" else None,
)
self.pipeline.safety_checker = None
self.pipeline.to(torch_device)
def get_init_params(self) -> Dict:
"""Return init params to determine what model is being used."""
return {"model_name": self.model_name, "model_path": self.model_path}
@torch.no_grad()
def generate(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[Any, float, List[int], List[float]]]:
"""
Generate the prompt from model.
Outputs must be generated text and score, not including prompt.
Args:
prompt: promt to generate from.
Returns:
list of generated text (list of length 1 for 1 generation).
"""
# TODO: Is this correct for getting arguments in?
if isinstance(prompt, str):
prompt = [prompt]
result = self.pipeline(prompt, output_type="np.array", **kwargs)
# Return None for logprobs and token logprobs
return [(im, None, None, None) for im in result["images"]]
@torch.no_grad()
def embed(self, prompt: Union[str, List[str]], **kwargs: Any) -> np.ndarray:
"""
Embed the prompt from model.
Args:
prompt: promt to embed from.
Returns:
list of embeddings (list of length 1 for 1 embedding).
"""
raise NotImplementedError("Embed not supported for diffusers")
@torch.no_grad()
def score_sequence(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[float, List[int], List[float]]]:
"""
Score a sequence of choices.
Args:
prompt (:obj:`str` or :obj:`List[str]`):
The prompt to score the choices against.
**kwargs:
Additional keyword arguments passed along to the :obj:`__call__` method.
"""
raise NotImplementedError("Score sequence not supported for diffusers")
| manifest-main | manifest/api/models/diffuser.py |
"""Models init."""
| manifest-main | manifest/api/models/__init__.py |
"""Model class."""
from typing import Any, Dict, List, Tuple, Union
import numpy as np
class Model:
"""Model class."""
def __init__(
self,
model_name_or_path: str,
model_type: str,
cache_dir: str,
device: int,
use_accelerate: bool,
use_parallelize: bool,
use_bitsandbytes: bool,
use_deepspeed: bool,
perc_max_gpu_mem_red: float,
use_fp16: bool,
):
"""
Initialize model.
All arguments will be passed in the request from Manifest.
Args:
model_name_or_path: model name string.
model_type: model type string for when model_name not in registry.
cache_dir: cache directory for model.
device: device to use for model.
use_accelerate: whether to use accelerate for multi-gpu inference.
use_parallelize: use HF default parallelize
use_bitsandbytes: use HF bits and bytes
use_deepspeed: use deepspeed
perc_max_gpu_mem_red: percent max memory reduction in accelerate
use_fp16: use fp16 for model weights.
"""
raise NotImplementedError()
def get_init_params(self) -> Dict:
"""Return init params to determine what model is being used."""
raise NotImplementedError()
def generate(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[Any, float, List[int], List[float]]]:
"""
Generate the prompt from model.
Outputs must be generated text and score, not including prompt.
Args:
prompt: promt to generate from.
Returns:
list of generated text (list of length 1 for 1 generation).
Each item is the response, answer logprob, list of tokens,
and list of logprobs for each token.
"""
raise NotImplementedError()
def embed(self, prompt: Union[str, List[str]], **kwargs: Any) -> np.ndarray:
"""
Embed the prompt from model.
Args:
prompt: promt to embed from.
Returns:
list of embeddings (list of length 1 for 1 embedding).
"""
raise NotImplementedError()
def score_sequence(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[float, List[int], List[float]]]:
"""
Score a sequence of choices.
Args:
prompt (:obj:`str` or :obj:`List[str]`):
The prompt to score the choices against.
**kwargs:
Additional keyword arguments passed along to the :obj:`__call__` method.
Returns:
Tuple of total score, tokens, and probs per token.
"""
raise NotImplementedError()
| manifest-main | manifest/api/models/model.py |
"""Huggingface model."""
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import PIL
import torch
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils.modeling import get_max_memory as acc_get_max_memory
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
BloomForCausalLM,
CLIPModel,
CLIPProcessor,
GPT2LMHeadModel,
GPTJForCausalLM,
GPTNeoForCausalLM,
GPTNeoXForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
OPTForCausalLM,
PreTrainedModel,
PreTrainedTokenizer,
)
import deepspeed
from manifest.api.models.model import Model
MODEL_REGISTRY = {
"EleutherAI/gpt-neo-125M": GPTNeoForCausalLM,
"EleutherAI/gpt-neo-1.3B": GPTNeoForCausalLM,
"EleutherAI/gpt-neo-2.7B": GPTNeoForCausalLM,
"EleutherAI/gpt-j-6B": GPTJForCausalLM,
"EleutherAI/gpt-neox-20b": GPTNeoXForCausalLM,
"facebook/opt-125m": OPTForCausalLM,
"facebook/opt-350m": OPTForCausalLM,
"Salesforce/codegen-2B-mono": AutoModelForCausalLM,
"Salesforce/codegen-6B-mono": AutoModelForCausalLM,
"facebook/opt-1.3b": OPTForCausalLM,
"facebook/opt-2.7b": OPTForCausalLM,
"facebook/opt-6.7b": OPTForCausalLM,
"facebook/opt-13b": OPTForCausalLM,
"facebook/opt-30b": OPTForCausalLM,
"gpt2": GPT2LMHeadModel,
"openai/clip-vit-base-patch32": CLIPModel,
"bigscience/bloom-560m": BloomForCausalLM,
"bigscience/bloom-1b7": BloomForCausalLM,
"bigscience/bloom-3b": BloomForCausalLM,
"bigscience/bloom-7b1": BloomForCausalLM,
"chainyo/alpaca-lora-7b": LlamaForCausalLM,
"bigscience/bloom": AutoModelForCausalLM,
"bigscience/T0pp": AutoModelForSeq2SeqLM,
"bigscience/T0_3B": AutoModelForSeq2SeqLM,
"google/t5-small-lm-adapt": AutoModelForSeq2SeqLM, # 220M
"google/t5-l-lm-adapt": AutoModelForSeq2SeqLM, # 800M
"google/t5-xl-lm-adapt": AutoModelForSeq2SeqLM, # 3B
"google/t5-xxl-lm-adapt": AutoModelForSeq2SeqLM, # 11B
"google/t5-v1_1-l": AutoModelForSeq2SeqLM, # 800M
"google/t5-v1_1-xl": AutoModelForSeq2SeqLM, # 3B
"google/t5-v1_1-xxl": AutoModelForSeq2SeqLM, # 11B
"google/flan-t5-l": AutoModelForSeq2SeqLM, # 800M
"google/flan-t5-xl": AutoModelForSeq2SeqLM, # 3B
"google/flan-t5-xxl": AutoModelForSeq2SeqLM, # 11B
}
MODEL_GENTYPE_REGISTRY = {
"text-generation": AutoModelForCausalLM,
"llama-text-generation": LlamaForCausalLM,
"text2text-generation": AutoModelForSeq2SeqLM,
}
def get_max_memory(gpu_reduction: float) -> Dict[int, str]:
"""Get max memory in GB times reduction."""
free_in_gb = int(torch.cuda.mem_get_info()[0] / 1024**3) # type: ignore
max_mem = f"{int(gpu_reduction*free_in_gb)}GB"
n_gpus = torch.cuda.device_count()
max_mem_dict = {i: max_mem for i in range(n_gpus)}
return max_mem_dict
class GenerationPipeline:
"""
Custom Pipeline.
HF pipelines do not handle devices well in multi-gpu setting.
Create our own generation pipeline.
"""
def __init__(
self,
model: Union[PreTrainedModel, deepspeed.InferenceEngine],
tokenizer: PreTrainedTokenizer,
device: int = None,
bitsandbytes: bool = False,
is_encdec: bool = False,
):
"""Initialize."""
# Use to turn off sampling
# https://github.com/TimDettmers/bitsandbytes/issues/42
self.bitsandbytes = bitsandbytes
self.model = model
self.is_encdec = is_encdec
config = model.config # type: ignore
# Used for GPT
self.max_length = getattr(config, "max_position_embeddings", None)
if self.max_length is None:
# Used for Bloom
self.max_length = getattr(config, "seq_length", None)
if self.max_length is None:
# Used for T0
self.max_length = getattr(config, "d_model", None)
if self.max_length is None:
# Default
self.max_length = 2048
print(f"Usings max_length: {self.max_length}")
self.tokenizer = tokenizer
# self.device = device
# With bits and bytes, do not want to place inputs on any device
# if self.device:
self.device = (
torch.device("cpu")
if (device == -1 or not torch.cuda.is_available())
else torch.device(f"cuda:{device}")
)
def __call__(
self, text: Union[str, List[str]], **kwargs: Any
) -> List[Dict[str, Union[str, List[float]]]]:
"""Generate from text.
Args:
text: text to generate.
Returns:
generated text.
"""
# If text is longer than max model length, we reduce max input length to ensure
# the user indicated generation tokens is preserved.
max_input_len = (
self.max_length - kwargs.get("max_new_tokens")
if not self.is_encdec
else self.max_length
)
encoded_prompt = self.tokenizer(
text,
max_length=max_input_len,
truncation=True,
padding=True,
return_tensors="pt",
)
encoded_prompt = encoded_prompt.to(self.device)
kwargs_to_pass = dict(
temperature=kwargs.get("temperature"),
top_k=kwargs.get("top_k"),
top_p=kwargs.get("top_p"),
repetition_penalty=kwargs.get("repetition_penalty"),
num_return_sequences=kwargs.get("num_return_sequences"),
)
kwargs_to_pass = {k: v for k, v in kwargs_to_pass.items() if v is not None}
output_dict = self.model.generate( # type: ignore
**encoded_prompt,
**kwargs_to_pass,
max_new_tokens=kwargs.get("max_new_tokens"),
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
output_scores=True,
return_dict_in_generate=True,
)
# logits/scores from the output always correspond to the generated tokens.
# shape (num_tokens, num_return_sequences, vocab_size)
logits = torch.stack(output_dict.scores)
logits = torch.nn.functional.log_softmax(logits, dim=-1)
num_generated_tokens = logits.shape[0]
generated_sequences = [
{
"generated_text": self.tokenizer.decode(
output_seq[-num_generated_tokens:], skip_special_tokens=True
),
"logprobs": logits[
range(num_generated_tokens), i, output_seq[-num_generated_tokens:]
].tolist(),
"tokens": output_seq[-num_generated_tokens:].tolist(),
}
for i, output_seq in enumerate(output_dict.sequences)
]
return generated_sequences
class HuggingFaceModel(Model):
"""HuggingFace Model."""
def __init__(
self,
model_name_or_path: str,
model_type: Optional[str] = None,
model_config: Optional[str] = None,
cache_dir: Optional[str] = None,
device: int = 0,
use_accelerate: bool = False,
use_parallelize: bool = False,
use_bitsandbytes: bool = False,
use_deepspeed: bool = False,
perc_max_gpu_mem_red: float = 1.0,
use_fp16: bool = False,
):
"""
Initialize model.
All arguments will be passed in the request from Manifest.
Args:
model_name_or_path: model name string.
model_config: model config string.
cache_dir: cache directory for model.
device: device to use for model.
use_accelerate: whether to use accelerate for multi-gpu inference.
use_parallelize: use HF default parallelize
use_bitsandbytes: use HF bits and bytes
use_deepspeed: use deepspeed
perc_max_gpu_mem_red: percent max memory reduction in accelerate
use_fp16: use fp16 for model weights.
"""
if sum([use_accelerate, use_parallelize, use_bitsandbytes, use_deepspeed]) > 1:
raise ValueError(
"Only one of use_accelerate, use_parallelize, "
"use_bitsandbytes, use_deepspeed can be set to True"
)
# Check if providing path
self.model_path = model_name_or_path
if Path(self.model_path).exists() and Path(self.model_path).is_dir():
# Try to find config
if (Path(self.model_path) / "config.json").exists():
config = json.load(open(Path(self.model_path) / "config.json"))
model_name_or_path = config["_name_or_path"]
self.model_name = model_name_or_path
self.model_type = model_type
if self.model_name not in MODEL_REGISTRY and self.model_type is None:
raise ValueError(
f"{self.model_name} is not in our registry. Please specify "
"--model_generation_type as either text-generation (for Causal)"
" or text2text-generation (for Seq2Seq)"
)
print("Model Name:", self.model_name, "Model Path:", self.model_path)
def get_init_params(self) -> Dict:
"""Return init params to determine what model is being used."""
return {"model_name": self.model_name, "model_path": self.model_path}
def _dispatch_deepspeed_model(
self, model: PreTrainedModel
) -> deepspeed.InferenceEngine:
"""
Load model with deepspeed.
Adapted from https://www.deepspeed.ai/tutorials/inference-tutorial/
Args:
model: loaded hugging face model
"""
model = deepspeed.init_inference(
model=model,
mp_size=1,
dtype=model.dtype,
replace_method="auto",
replace_with_kernel_inject=True,
)
return model
def _dispatch_accelerate_model(
self, model: PreTrainedModel, perc_max_gpu_mem_red: float
) -> None:
"""
Load model with accelerate.
Adapted from https://colab.research.google.com/drive/14wnxMvD9zsiBQo2FtT
pxn6w2cpXCcb-7#scrollTo=y8Ne7jJdaF9F&uniqifier=1
Args:
model: loaded hugging face model
perc_max_gpu_mem_red: percent memory reduction
"""
model.tie_weights() # type: ignore
# Get the model where we can infer devices from
if hasattr(model, "model"):
# OPT
main_model = model.model # type: ignore
model_getter = "model."
else:
# Eleuther Neo and J
main_model = model
model_getter = ""
# Decrease max mem
max_memory = {
k: int(perc_max_gpu_mem_red * v) for k, v in acc_get_max_memory().items()
}
raw_device_map = infer_auto_device_map(
main_model,
max_memory=max_memory,
no_split_module_classes=[
"OPTDecoderLayer",
"GPTNeoBlock",
"GPTJBlock",
"GPTNeoXLayer",
"T5Block",
],
dtype=model.dtype, # type: ignore
)
# Hacky fix for Eleuther getting the "weight" of embeddings
device_map = {}
for k, v in raw_device_map.items():
if k in {"wte", "wpe"}:
device_map[f"{model_getter}{k}.weight"] = v
else:
device_map[f"{model_getter}{k}"] = v
# For OPT models
if "lm_head" not in device_map:
try:
device_map["lm_head"] = max(device_map.values())
except TypeError:
device_map["lm_head"] = "cpu"
print("Device Map", device_map)
dispatch_model(model, device_map=device_map)
return
class CrossModalEncoderModel(HuggingFaceModel):
"""CrossModalEncoderModel."""
def __init__(
self,
model_name_or_path: str,
model_type: Optional[str] = None,
model_config: Optional[str] = None,
cache_dir: Optional[str] = None,
device: int = 0,
use_accelerate: bool = False,
use_parallelize: bool = False,
use_bitsandbytes: bool = False,
use_deepspeed: bool = False,
perc_max_gpu_mem_red: float = 1.0,
use_fp16: bool = False,
):
"""
Initialize model.
All arguments will be passed in the request from Manifest.
Args:
model_name_or_path: model name string.
model_config: model config string.
cache_dir: cache directory for model.
device: device to use for model.
use_accelerate: whether to use accelerate for multi-gpu inference.
use_parallelize: use HF default parallelize
use_bitsandbytes: use HF bits and bytes
use_deepspeed: use deepspeed
perc_max_gpu_mem_red: percent max memory reduction in accelerate
use_fp16: use fp16 for model weights.
"""
super().__init__(
model_name_or_path,
model_type,
model_config,
cache_dir,
device,
use_accelerate,
use_parallelize,
use_bitsandbytes,
use_deepspeed,
perc_max_gpu_mem_red,
use_fp16,
)
# TODO: make this generalizable
self.processor = CLIPProcessor.from_pretrained(self.model_path)
model = MODEL_REGISTRY.get(
self.model_name, MODEL_GENTYPE_REGISTRY.get(self.model_type, None)
).from_pretrained(
self.model_path,
cache_dir=cache_dir,
trust_remote_code=True,
)
model.eval()
torch_device = (
torch.device("cpu")
if (device == -1 or not torch.cuda.is_available())
else torch.device(f"cuda:{device}")
)
self.model = model.to(torch_device) # type: ignore
@torch.no_grad()
def embed(self, prompt: Union[str, List[str]], **kwargs: Any) -> np.ndarray:
"""
Compute embedding for prompts.
Args:
prompt: promt to generate from.
Returns:
embedding
"""
if isinstance(prompt, str):
inputs = self.processor(text=prompt, return_tensors="pt", padding=True)
elif isinstance(prompt, PIL.Image.Image):
inputs = self.processor(images=prompt, return_tensors="pt", padding=True)
else:
raise ValueError("Prompt must be a string or an image")
outputs = self.model(**inputs)
return outputs
class TextGenerationModel(HuggingFaceModel):
"""Huggingface model."""
def __init__(
self,
model_name_or_path: str,
model_type: Optional[str] = None,
model_config: Optional[str] = None,
cache_dir: Optional[str] = None,
device: int = 0,
use_accelerate: bool = False,
use_parallelize: bool = False,
use_bitsandbytes: bool = False,
use_deepspeed: bool = False,
perc_max_gpu_mem_red: float = 1.0,
use_fp16: bool = False,
):
"""
Initialize model.
All arguments will be passed in the request from Manifest.
Args:
model_name_or_path: model name string.
model_config: model config string.
cache_dir: cache directory for model.
device: device to use for model.
use_accelerate: whether to use accelerate for multi-gpu inference.
use_parallelize: use HF default parallelize
use_bitsandbytes: use HF bits and bytes
use_deepspeed: use deepspeed
perc_max_gpu_mem_red: percent max memory reduction in accelerate
use_fp16: use fp16 for model weights.
"""
super().__init__(
model_name_or_path,
model_type,
model_config,
cache_dir,
device,
use_accelerate,
use_parallelize,
use_bitsandbytes,
use_deepspeed,
perc_max_gpu_mem_red,
use_fp16,
)
if (
MODEL_REGISTRY.get(
self.model_name, MODEL_GENTYPE_REGISTRY.get(self.model_type, None)
)
== LlamaForCausalLM
):
tokenizer = LlamaTokenizer.from_pretrained(self.model_name)
else:
try:
tokenizer = AutoTokenizer.from_pretrained(
self.model_name, truncation_side="left", padding_side="left"
)
except ValueError:
tokenizer = AutoTokenizer.from_pretrained(
self.model_name,
truncation_side="left",
padding_side="left",
use_fast=False,
)
dtype = torch.float16 if use_fp16 else "auto"
if use_bitsandbytes:
print("WARNING!!! Cannot use sampling with bitsandbytes.")
max_memory = get_max_memory(perc_max_gpu_mem_red)
model = MODEL_REGISTRY.get(
self.model_name, MODEL_GENTYPE_REGISTRY.get(self.model_type, None)
).from_pretrained( # type: ignore
self.model_path,
cache_dir=cache_dir,
load_in_8bit=True,
device_map="auto",
max_memory=max_memory,
trust_remote_code=True,
)
else:
try:
# Try to explicitely find a fp16 copy (gpt-j-6B for example)
model = MODEL_REGISTRY.get(
self.model_name, MODEL_GENTYPE_REGISTRY.get(self.model_type, None)
).from_pretrained( # type: ignore
self.model_path,
cache_dir=cache_dir,
revision="float16",
torch_dtype=torch.float16,
trust_remote_code=True,
)
except Exception:
model = MODEL_REGISTRY.get(
self.model_name, MODEL_GENTYPE_REGISTRY.get(self.model_type, None)
).from_pretrained( # type: ignore
self.model_path,
cache_dir=cache_dir,
torch_dtype=dtype,
trust_remote_code=True,
)
model.eval()
print(f"Loaded Model DType {model.dtype}")
self.is_encdec = model.config.is_encoder_decoder
if not self.is_encdec:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
if not use_bitsandbytes:
if use_accelerate:
self._dispatch_accelerate_model(model, perc_max_gpu_mem_red)
device = 0
elif use_parallelize:
model.parallelize()
device = 0
elif use_deepspeed:
self._dispatch_deepspeed_model(model)
device = 0
else:
if device > -1:
torch_device = (
torch.device("cpu")
if (device == -1 or not torch.cuda.is_available())
else torch.device(f"cuda:{device}")
)
model = model.to(torch_device) # type: ignore
self.pipeline = GenerationPipeline( # type: ignore
model=model,
tokenizer=tokenizer,
device=device,
bitsandbytes=use_bitsandbytes,
is_encdec=self.is_encdec,
)
@torch.no_grad()
def embed(self, prompt: Union[str, List[str]], **kwargs: Any) -> np.ndarray:
"""
Embed the prompt from model.
Args:
prompt: promt to embed from.
Returns:
list of embeddings (list of length 1 for 1 embedding).
"""
if isinstance(prompt, str):
prompt = [prompt]
encoded_prompt = self.pipeline.tokenizer(
prompt,
max_length=self.pipeline.max_length,
truncation=True,
padding=True,
return_tensors="pt",
)
encoded_prompt = encoded_prompt.to(self.pipeline.device)
# Get last hidden state
output = self.pipeline.model( # type: ignore
**encoded_prompt,
output_hidden_states=True,
return_dict=True,
)
last_hidden_state = output["hidden_states"][-1][:, -1, :]
return last_hidden_state.cpu().numpy()
@torch.no_grad()
def generate(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[Any, float, List[int], List[float]]]:
"""
Generate the prompt from model.
Outputs must be generated text and score, not including prompt.
Args:
prompt: promt to generate from.
Returns:
list of generated text (list of length 1 for 1 generation).
"""
num_return = kwargs.get("n", 1)
if isinstance(prompt, list) and num_return > 1:
raise ValueError("In batch generate, n must be 1.")
result = self.pipeline(
prompt,
max_new_tokens=kwargs.get("max_tokens"),
temperature=kwargs.get("temperature"),
repetition_penalty=kwargs.get("repetition_penalty"),
top_k=kwargs.get("top_k"),
top_p=kwargs.get("top_p"),
do_sample=kwargs.get("do_sample"),
num_return_sequences=num_return,
)
final_results = [
(
cast(str, r["generated_text"]),
sum(cast(List[float], r["logprobs"])),
cast(List[int], r["tokens"]),
cast(List[float], r["logprobs"]),
)
for r in result
]
return final_results
@torch.no_grad()
def score_sequence(
self, prompt: Union[str, List[str]], **kwargs: Any
) -> List[Tuple[float, List[int], List[float]]]:
"""
Score a sequence of choices.
Args:
prompt (:obj:`str` or :obj:`List[str]`):
The prompt to score the choices against.
**kwargs:
Additional keyword arguments passed along to the :obj:`__call__` method.
"""
if isinstance(prompt, str):
prompt = [prompt]
encoded_prompt = self.pipeline.tokenizer(
prompt,
max_length=self.pipeline.max_length,
truncation=True,
padding=True,
return_tensors="pt",
)
encoded_prompt["labels"] = encoded_prompt["input_ids"].clone()
encoded_prompt = encoded_prompt.to(self.pipeline.device)
logits = self.pipeline.model( # type: ignore
**encoded_prompt,
).logits
# For causal decoders, shift logts and labels
labels_attention_mask = encoded_prompt["attention_mask"].unsqueeze(-1)
masked_log_probs = labels_attention_mask.float() * torch.log_softmax(
logits.float(), dim=-1
)
seq_token_log_probs = torch.gather(
masked_log_probs, -1, encoded_prompt["labels"].unsqueeze(-1)
)
seq_token_log_probs = seq_token_log_probs.squeeze(dim=-1)
seq_log_prob = seq_token_log_probs.sum(dim=-1)
return [
(seq, tokens, seq_token)
for seq, tokens, seq_token in zip(
seq_log_prob.tolist(),
encoded_prompt["input_ids"].tolist(),
seq_token_log_probs.tolist(),
)
]
| manifest-main | manifest/api/models/huggingface.py |
"""Serializer."""
import io
import json
import os
from pathlib import Path
from typing import Dict
import numpy as np
import xxhash
from manifest.caches.array_cache import ArrayCache
class Serializer:
"""Serializer."""
def request_to_key(self, request: Dict) -> str:
"""
Normalize a request into a key.
Args:
request: request to normalize.
Returns:
normalized key.
"""
return json.dumps(request, sort_keys=True)
def key_to_request(self, key: str) -> Dict:
"""
Convert the normalized version to the request.
Args:
key: normalized key to convert.
Returns:
unnormalized request dict.
"""
return json.loads(key)
def response_to_key(self, response: Dict) -> str:
"""
Normalize a response into a key.
Args:
response: response to normalize.
Returns:
normalized key.
"""
return json.dumps(response, sort_keys=True)
def key_to_response(self, key: str) -> Dict:
"""
Convert the normalized version to the response.
Args:
key: normalized key to convert.
Returns:
unnormalized response dict.
"""
return json.loads(key)
class NumpyByteSerializer(Serializer):
"""Serializer by casting array to byte string."""
def response_to_key(self, response: Dict) -> str:
"""
Normalize a response into a key.
Args:
response: response to normalize.
Returns:
normalized key.
"""
sub_response = response["response"]
# Assume response is a dict with keys "choices" -> List dicts
# with keys "array".
choices = sub_response["choices"]
# We don't want to modify the response in place
# but we want to avoid calling deepcopy on an array
del sub_response["choices"]
response_copy = sub_response.copy()
sub_response["choices"] = choices
response_copy["choices"] = []
for choice in choices:
if "array" not in choice:
raise ValueError(
f"Choice with keys {choice.keys()} does not have array key."
)
arr = choice["array"]
# Avoid copying an array
del choice["array"]
new_choice = choice.copy()
choice["array"] = arr
with io.BytesIO() as f:
np.savez_compressed(f, data=arr)
hash_str = f.getvalue().hex()
new_choice["array"] = hash_str
response_copy["choices"].append(new_choice)
response["response"] = response_copy
return json.dumps(response, sort_keys=True)
def key_to_response(self, key: str) -> Dict:
"""
Convert the normalized version to the response.
Args:
key: normalized key to convert.
Returns:
unnormalized response dict.
"""
response = json.loads(key)
for choice in response["response"]["choices"]:
hash_str = choice["array"]
byte_str = bytes.fromhex(hash_str)
with io.BytesIO(byte_str) as f:
choice["array"] = np.load(f)["data"]
return response
class ArraySerializer(Serializer):
"""Serializer for array."""
def __init__(self) -> None:
"""
Initialize array serializer.
We don't want to cache the array. We hash the value and
store the array in a memmap file. Store filename/offsets
in sqlitedict to keep track of hash -> array.
"""
super().__init__()
self.hash = xxhash.xxh64()
manifest_home = Path(os.environ.get("MANIFEST_HOME", Path.home()))
cache_folder = manifest_home / ".manifest" / "array_cache"
self.writer = ArrayCache(cache_folder)
def response_to_key(self, response: Dict) -> str:
"""
Normalize a response into a key.
Convert arrays to hash string for cache key.
Args:
response: response to normalize.
Returns:
normalized key.
"""
sub_response = response["response"]
# Assume response is a dict with keys "choices" -> List dicts
# with keys "array".
choices = sub_response["choices"]
# We don't want to modify the response in place
# but we want to avoid calling deepcopy on an array
del sub_response["choices"]
response_copy = sub_response.copy()
sub_response["choices"] = choices
response_copy["choices"] = []
for choice in choices:
if "array" not in choice:
raise ValueError(
f"Choice with keys {choice.keys()} does not have array key."
)
arr = choice["array"]
# Avoid copying an array
del choice["array"]
new_choice = choice.copy()
choice["array"] = arr
self.hash.update(arr)
hash_str = self.hash.hexdigest()
self.hash.reset()
new_choice["array"] = hash_str
response_copy["choices"].append(new_choice)
if not self.writer.contains_key(hash_str):
self.writer.put(hash_str, arr)
response["response"] = response_copy
return json.dumps(response, sort_keys=True)
def key_to_response(self, key: str) -> Dict:
"""
Convert the normalized version to the response.
Convert the hash string keys to the arrays.
Args:
key: normalized key to convert.
Returns:
unnormalized response dict.
"""
response = json.loads(key)
for choice in response["response"]["choices"]:
hash_str = choice["array"]
choice["array"] = self.writer.get(hash_str)
return response
| manifest-main | manifest/caches/serializers.py |
"""Cache for queries and responses."""
from abc import ABC, abstractmethod
from typing import Any, Dict, Type, Union
from manifest.caches.serializers import ArraySerializer, NumpyByteSerializer, Serializer
from manifest.request import DiffusionRequest, EmbeddingRequest, LMRequest, Request
from manifest.response import Response
# Non-text return type caches
ARRAY_CACHE_TYPES = {EmbeddingRequest, DiffusionRequest}
class Cache(ABC):
"""A cache for request/response pairs."""
def __init__(
self,
connection_str: str,
request_type: Type[Request] = LMRequest,
cache_args: Dict[str, Any] = {},
):
"""
Initialize cache.
Args:
connection_str: connection string.
request_type: request type.
cache_args: arguments for cache.
cache_args are any arguments needed to initialize the cache.
Further, cache_args can contain `array_serializer` as a string
for embedding or image return types (e.g. diffusers) with values
as `local_file` or `byte_string`. `local_file` will save the
array in a local file and cache a pointer to the file.
`byte_string` will convert the array to a byte string and cache
the entire byte string. `byte_string` is default.
Args:
connection_str: connection string for cache.
cache_args: cache arguments.
"""
self.request_type = request_type
self.connect(connection_str, cache_args)
if self.request_type in ARRAY_CACHE_TYPES:
array_serializer = cache_args.pop("array_serializer", "byte_string")
if array_serializer not in ["local_file", "byte_string"]:
raise ValueError(
"array_serializer must be local_file or byte_string,"
f" not {array_serializer}"
)
self.serializer = (
ArraySerializer()
if array_serializer == "local_file"
else NumpyByteSerializer()
)
else:
# If user has array_serializer type, it will throw an error as
# it is not recognized for non-array return types.
self.serializer = Serializer()
@abstractmethod
def close(self) -> None:
"""Close the cache."""
raise NotImplementedError()
@abstractmethod
def connect(self, connection_str: str, cache_args: Dict[str, Any]) -> None:
"""
Connect to cache.
Args:
connection_str: connection string.
"""
raise NotImplementedError()
@abstractmethod
def get_key(self, key: str, table: str = "default") -> Union[str, None]:
"""
Get the key for a request.
With return None if key is not in cache.
Args:
key: key for cache.
table: table to get key in.
"""
raise NotImplementedError()
@abstractmethod
def set_key(self, key: str, value: str, table: str = "default") -> None:
"""
Set the value for the key.
Will override old value.
Args:
key: key for cache.
value: new value for key.
table: table to set key in.
"""
raise NotImplementedError()
@abstractmethod
def commit(self) -> None:
"""Commit any results."""
raise NotImplementedError()
def get(self, request: Dict) -> Union[Response, None]:
"""Get the result of request (by calling compute as needed).
Args:
request: request to get.
response: response to get.
Returns:
Response object or None if not in cache.
"""
key = self.serializer.request_to_key(request)
cached_response = self.get_key(key)
if cached_response:
response = self.serializer.key_to_response(cached_response)
response["cached"] = True
return Response.from_dict(response, request_dict=request)
return None
def set(self, request: Dict, response: Dict) -> None:
"""Set the value for the key.
Args:
request: request to set.
response: response to set.
"""
key = self.serializer.request_to_key(request)
self.set_key(key, self.serializer.response_to_key(response))
| manifest-main | manifest/caches/cache.py |
"""Cache init."""
| manifest-main | manifest/caches/__init__.py |
"""SQLite cache."""
import logging
from typing import Any, Dict, Union
from sqlitedict import SqliteDict
from manifest.caches.cache import Cache
logging.getLogger("sqlitedict").setLevel(logging.WARNING)
class SQLiteCache(Cache):
"""A SQLite cache for request/response pairs."""
def connect(self, connection_str: str, cache_args: Dict[str, Any]) -> None:
"""
Connect to client.
Args:
connection_str: connection string.
cache_args: arguments for cache.
"""
self.cache_file = connection_str
if not self.cache_file:
self.cache_file = ".sqlite.cache"
self.cache = SqliteDict(self.cache_file, autocommit=True)
return
def close(self) -> None:
"""Close the client."""
self.cache.close()
def _normalize_table_key(self, key: str, table: str) -> str:
"""Cast key for prompt key."""
return f"{table}:{key}"
def get_key(self, key: str, table: str = "default") -> Union[str, None]:
"""
Get the key for a request.
With return None if key is not in cache.
Args:
key: key for cache.
table: table to get key in.
"""
return self.cache.get(self._normalize_table_key(key, table))
def set_key(self, key: str, value: str, table: str = "default") -> None:
"""
Set the value for the key.
Will override old value.
Args:
key: key for cache.
value: new value for key.
table: table to set key in.
"""
self.cache[self._normalize_table_key(key, table)] = value
self.commit()
def commit(self) -> None:
"""Commit any results."""
self.cache.commit()
| manifest-main | manifest/caches/sqlite.py |
"""Redis cache."""
from typing import Any, Dict, Union
import redis
from manifest.caches.cache import Cache
class RedisCache(Cache):
"""A Redis cache for request/response pairs."""
def connect(self, connection_str: str, cache_args: Dict[str, Any]) -> None:
"""
Connect to client.
Args:
connection_str: connection string.
cache_args: arguments for cache.
"""
host, port = connection_str.split(":")
self.redis = redis.Redis(host=host, port=int(port), db=0)
return
def close(self) -> None:
"""Close the client."""
self.redis.close()
def _normalize_table_key(self, key: str, table: str) -> str:
"""Cast key for prompt key."""
return f"{table}:{key}"
def get_key(self, key: str, table: str = "default") -> Union[str, None]:
"""
Get the key for a request.
With return None if key is not in cache.
Args:
key: key for cache.
table: table to get key in.
"""
norm_key = self._normalize_table_key(key, table)
if self.redis.exists(norm_key):
return self.redis.get(norm_key).decode("utf-8")
else:
return None
def set_key(self, key: str, value: str, table: str = "default") -> None:
"""
Set the value for the key.
Will override old value.
Args:
key: key for cache.
value: new value for key.
table: table to set key in.
"""
self.redis.set(self._normalize_table_key(key, table), value)
self.commit()
def commit(self) -> None:
"""Commit any results."""
pass
| manifest-main | manifest/caches/redis.py |
"""Noop cache."""
from typing import Any, Dict, Union
from manifest.caches.cache import Cache
class NoopCache(Cache):
"""A Noop cache that caches nothing for request/response pairs."""
def connect(self, connection_str: str, cache_args: Dict[str, Any]) -> None:
"""
Connect to client.
Args:
connection_str: connection string.
cache_args: arguments for cache.
"""
pass
def close(self) -> None:
"""Close the client."""
pass
def get_key(self, key: str, table: str = "default") -> Union[str, None]:
"""
Return None key for never in cache.
Args:
key: key for cache.
table: table to get key in.
"""
return None
def set_key(self, key: str, value: str, table: str = "default") -> None:
"""
Do not set anything as no cache.
Args:
key: key for cache.
value: new value for key.
table: table to set key in.
"""
pass
def commit(self) -> None:
"""Commit any results."""
pass
| manifest-main | manifest/caches/noop.py |
"""Postgres cache."""
import hashlib
import logging
from typing import Any, Dict, Union
logger = logging.getLogger("postgresql")
logger.setLevel(logging.WARNING)
from ..caches.cache import Cache
try:
import sqlalchemy # type: ignore
from google.cloud.sql.connector import Connector # type: ignore
from sqlalchemy import Column, String # type: ignore
from sqlalchemy.ext.declarative import declarative_base # type: ignore
from sqlalchemy.orm import sessionmaker # type: ignore
Base = declarative_base()
class Request(Base): # type: ignore
"""The request table."""
__tablename__ = "requests"
key = Column(String, primary_key=True)
response = Column(
String
) # FIXME: ideally should be an hstore, but I don't want to set it up on GCP
missing_dependencies = None
except ImportError as e:
missing_dependencies = e
class PostgresCache(Cache):
"""A PostgreSQL cache for request/response pairs."""
def connect(self, connection_str: str, cache_args: Dict[str, Any]) -> None:
"""
Connect to client.
Args:
connection_str: connection string.
cache_args: arguments for cache should include the following fields:
{
"cache_user": "",
"cache_password": "",
"cache_db": ""
}
"""
if missing_dependencies:
raise ValueError(
"Missing dependencies for GCP PostgreSQL cache. "
"Install with `pip install manifest[gcp]`",
missing_dependencies,
)
connector = Connector()
def getconn() -> Any:
conn = connector.connect(
connection_str,
"pg8000",
user=cache_args.pop("cache_user"),
password=cache_args.pop("cache_password"),
db=cache_args.pop("cache_db"),
)
return conn
engine = sqlalchemy.create_engine(
"postgresql+pg8000://",
creator=getconn,
)
engine.dialect.description_encoding = None # type: ignore
db_exists = len(sqlalchemy.inspect(engine).get_table_names()) > 0
if not db_exists:
logger.info("Creating database...")
Base.metadata.create_all(engine)
self.session = sessionmaker(bind=engine)()
def close(self) -> None:
"""Close the client."""
self.session.close()
@staticmethod
def _hash_key(key: str, table: str) -> str:
"""Compute MD5 hash of the key."""
return hashlib.md5(f"{key}:{table}".encode("utf-8")).hexdigest()
def get_key(self, key: str, table: str = "default") -> Union[str, None]:
"""
Get the key for a request.
With return None if key is not in cache.
Args:
key: key for cache.
table: table to get key in.
"""
request = (
self.session.query(Request) # type: ignore
.filter_by(key=self._hash_key(key, table))
.first()
)
out = request.response if request else None
return out # type: ignore
def set_key(self, key: str, value: str, table: str = "default") -> None:
"""
Set the value for the key.
Will override old value.
Args:
key: key for cache.
value: new value for key.
table: table to set key in.
"""
key = self._hash_key(key, table)
request = self.session.query(Request).filter_by(key=key).first() # type: ignore
if request:
request.response = value # type: ignore
else:
self.session.add(Request(key=key, response=value))
self.commit()
def commit(self) -> None:
"""Commit any results."""
self.session.commit()
| manifest-main | manifest/caches/postgres.py |
"""Array cache."""
from pathlib import Path
from typing import Union
import numpy as np
from sqlitedict import SqliteDict
def open_mmap_arr(file: Union[Path, str], size: float) -> np.memmap:
"""Open memmap."""
if not Path(file).exists():
mode = "w+"
else:
mode = "r+"
arr = np.memmap( # type: ignore
str(file),
dtype=np.float32, # This means we only support float 32
mode=mode,
shape=size,
)
return arr
class ArrayCache:
"""Array cache."""
def __init__(self, folder: Union[str, Path]) -> None:
"""
Initialize the array writer.
Args:
folder: folder to write to.
"""
self.folder = Path(folder)
self.folder.mkdir(exist_ok=True, parents=True)
self.hash2arrloc = SqliteDict(
self.folder / "hash2arrloc.sqlite", autocommit=True
)
# Approx 1GB (I think)
self.max_memmap_size = 20480000
self.cur_file_idx = 0
# Get the last file idx used
for key in self.hash2arrloc:
file_data = self.hash2arrloc[key]
if file_data["file_idx"] > self.cur_file_idx:
self.cur_file_idx = file_data["file_idx"]
self.cur_memmap = open_mmap_arr(
self.folder / f"{self.cur_file_idx}.npy",
self.max_memmap_size,
)
# Make sure there is space left in the memmap
non_zero = np.nonzero(self.cur_memmap)[0]
if len(non_zero) > 0:
self.cur_offset = int(np.max(non_zero) + 1)
else:
self.cur_offset = 0
# If no space, make a new memmap
if self.cur_offset == self.max_memmap_size:
self.cur_file_idx += 1
self.cur_memmap = open_mmap_arr(
self.folder / f"{self.cur_file_idx}.npy",
self.max_memmap_size,
)
self.cur_offset = 0
def contains_key(self, key: str) -> bool:
"""
Check if the key is in the cache.
Args:
key: key to check.
Returns:
True if the key is in the cache.
"""
return key in self.hash2arrloc
def put(self, key: str, arr: np.ndarray) -> None:
"""Save array in store and associate location with key."""
# Check if there is space in the memmap
arr_shape = arr.shape
arr = arr.flatten()
if len(arr) > self.max_memmap_size:
raise ValueError(
f"Array is too large to be cached. Max is {self.max_memmap_size}"
)
if self.cur_offset + len(arr) > self.max_memmap_size:
self.cur_file_idx += 1
self.cur_memmap = open_mmap_arr(
self.folder / f"{self.cur_file_idx}.npy",
self.max_memmap_size,
)
self.cur_offset = 0
self.cur_memmap[self.cur_offset : self.cur_offset + len(arr)] = arr
self.cur_memmap.flush()
self.hash2arrloc[key] = {
"file_idx": self.cur_file_idx,
"offset": self.cur_offset,
"flatten_size": len(arr),
"shape": arr_shape,
"dtype": arr.dtype,
}
self.cur_offset += len(arr)
return
def get(self, key: str) -> np.ndarray:
"""Get array associated with location from key."""
file_data = self.hash2arrloc[key]
memmap = open_mmap_arr(
self.folder / f"{file_data['file_idx']}.npy",
self.max_memmap_size,
)
arr = memmap[
file_data["offset"] : file_data["offset"] + file_data["flatten_size"]
]
return arr.reshape(file_data["shape"]).astype(file_data["dtype"])
| manifest-main | manifest/caches/array_cache.py |
"""Test client pool."""
import time
import pytest
from manifest.connections.client_pool import ClientConnection, ClientConnectionPool
from manifest.request import LMRequest
def test_init() -> None:
"""Test initialization."""
client_connection1 = ClientConnection(
client_name="openai", client_connection="XXX", engine="text-davinci-002"
)
client_connection2 = ClientConnection(
client_name="openai", client_connection="XXX", engine="text-ada-001"
)
client_connection3 = ClientConnection(
client_name="openaiembedding", client_connection="XXX"
)
with pytest.raises(ValueError) as exc_info:
ClientConnectionPool(
[client_connection1, client_connection2], client_pool_scheduler="bad"
)
assert str(exc_info.value) == "Unknown scheduler: bad."
with pytest.raises(ValueError) as exc_info:
ClientConnectionPool([client_connection1, client_connection3])
assert (
str(exc_info.value)
== "All clients in the client pool must use the same request type. You have [\"<class 'manifest.request.EmbeddingRequest'>\", \"<class 'manifest.request.LMRequest'>\"]" # noqa: E501"
)
pool = ClientConnectionPool([client_connection1, client_connection2])
assert pool.request_type == LMRequest
assert len(pool.client_pool) == 2
assert len(pool.client_pool_metrics) == 2
assert pool.client_pool[0].engine == "text-davinci-002" # type: ignore
assert pool.client_pool[1].engine == "text-ada-001" # type: ignore
def test_timing() -> None:
"""Test timing client."""
client_connection1 = ClientConnection(client_name="dummy")
client_connection2 = ClientConnection(client_name="dummy")
connection_pool = ClientConnectionPool([client_connection1, client_connection2])
connection_pool.get_next_client()
assert connection_pool.current_client_id == 0
connection_pool.start_timer()
time.sleep(2)
connection_pool.end_timer()
connection_pool.get_next_client()
assert connection_pool.current_client_id == 1
connection_pool.start_timer()
time.sleep(1)
connection_pool.end_timer()
timing = connection_pool.client_pool_metrics
assert timing[0].end - timing[0].start > 1.9
assert timing[1].end - timing[1].start > 0.9
| manifest-main | tests/test_client_pool.py |
"""Setup for all tests."""
import os
import shutil
from pathlib import Path
from typing import Generator
import numpy as np
import pytest
import redis
from manifest.request import DiffusionRequest, EmbeddingRequest, LMRequest
from manifest.response import ArrayModelChoice, LMModelChoice, ModelChoices
@pytest.fixture
def model_choice() -> ModelChoices:
"""Get dummy model choice."""
model_choices = ModelChoices(
choices=[
LMModelChoice(
text="hello", token_logprobs=[0.1, 0.2], tokens=["hel", "lo"]
),
LMModelChoice(text="bye", token_logprobs=[0.3], tokens=["bye"]),
]
)
return model_choices
@pytest.fixture
def model_choice_single() -> ModelChoices:
"""Get dummy model choice."""
model_choices = ModelChoices(
choices=[
LMModelChoice(
text="helloo", token_logprobs=[0.1, 0.2], tokens=["hel", "loo"]
),
]
)
return model_choices
@pytest.fixture
def model_choice_arr() -> ModelChoices:
"""Get dummy model choice."""
np.random.seed(0)
model_choices = ModelChoices(
choices=[
ArrayModelChoice(array=np.random.randn(4, 4), token_logprobs=[0.1, 0.2]),
ArrayModelChoice(array=np.random.randn(4, 4), token_logprobs=[0.3]),
]
)
return model_choices
@pytest.fixture
def model_choice_arr_int() -> ModelChoices:
"""Get dummy model choice."""
np.random.seed(0)
model_choices = ModelChoices(
choices=[
ArrayModelChoice(
array=np.random.randint(20, size=(4, 4)), token_logprobs=[0.1, 0.2]
),
ArrayModelChoice(
array=np.random.randint(20, size=(4, 4)), token_logprobs=[0.3]
),
]
)
return model_choices
@pytest.fixture
def request_lm() -> LMRequest:
"""Get dummy request."""
request = LMRequest(prompt=["what", "cat"])
return request
@pytest.fixture
def request_lm_single() -> LMRequest:
"""Get dummy request."""
request = LMRequest(prompt="monkey", engine="dummy")
return request
@pytest.fixture
def request_array() -> EmbeddingRequest:
"""Get dummy request."""
request = EmbeddingRequest(prompt="hello")
return request
@pytest.fixture
def request_diff() -> DiffusionRequest:
"""Get dummy request."""
request = DiffusionRequest(prompt="hello")
return request
@pytest.fixture
def sqlite_cache(tmp_path: Path) -> Generator[str, None, None]:
"""Sqlite Cache."""
cache = str(tmp_path / "sqlite_cache.sqlite")
yield cache
shutil.rmtree(cache, ignore_errors=True)
@pytest.fixture
def redis_cache() -> Generator[str, None, None]:
"""Redis cache."""
host = os.environ.get("REDIS_HOST", "localhost")
port = int(os.environ.get("REDIS_PORT", 6379))
yield f"{host}:{port}"
# Clear out the database
try:
db = redis.Redis(host=host, port=port)
db.flushdb()
# For better local testing, pass if redis DB not started
except redis.exceptions.ConnectionError:
pass
@pytest.fixture
def postgres_cache(monkeypatch: pytest.MonkeyPatch) -> Generator[str, None, None]:
"""Postgres cache."""
import sqlalchemy # type: ignore
# Replace the sqlalchemy.create_engine function with a function that returns an
# in-memory SQLite engine
url = sqlalchemy.engine.url.URL.create("sqlite", database=":memory:")
engine = sqlalchemy.create_engine(url)
monkeypatch.setattr(sqlalchemy, "create_engine", lambda *args, **kwargs: engine)
return engine # type: ignore
| manifest-main | tests/conftest.py |
"""Response test."""
from typing import List, cast
import numpy as np
import pytest
from manifest import Response
from manifest.request import EmbeddingRequest, LMRequest
from manifest.response import (
ArrayModelChoice,
LMModelChoice,
ModelChoices,
Usage,
Usages,
)
def test_init(
model_choice: ModelChoices,
model_choice_arr: ModelChoices,
model_choice_arr_int: ModelChoices,
request_lm: LMRequest,
request_array: EmbeddingRequest,
) -> None:
"""Test response initialization."""
response = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
assert response._response == model_choice
assert response._cached is False
assert response._request == request_lm
assert response._usages == Usages(usages=[])
assert response._request_type == LMRequest
assert response._response_type == "text"
assert response._item_dtype is None
response = Response(
response=model_choice_arr_int,
cached=False,
request=request_array,
usages=Usages(usages=[Usage(total_tokens=4), Usage(total_tokens=6)]),
request_type=EmbeddingRequest,
response_type="array",
)
assert response._cached is False
assert response._request == request_array
assert sum([usg.total_tokens for usg in response._usages.usages]) == 10
assert response._request_type == EmbeddingRequest
assert response._response_type == "array"
assert response._item_dtype == "int64"
with pytest.raises(ValueError) as excinfo:
Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="blah",
)
assert "blah" in str(excinfo.value)
# Can't convert array with text
with pytest.raises(ValueError) as excinfo:
Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="array",
)
assert str(excinfo.value) == (
"response_type is array but response is "
"<class 'manifest.response.LMModelChoice'>"
)
# Can't convert text with array
with pytest.raises(ValueError) as excinfo:
Response(
response=model_choice_arr,
cached=False,
request=request_array,
usages=None,
request_type=LMRequest,
response_type="text",
)
assert str(excinfo.value) == (
"response_type is text but response is "
"<class 'manifest.response.ArrayModelChoice'>"
)
def test_getters(model_choice: ModelChoices, request_lm: LMRequest) -> None:
"""Test response cached."""
response = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
assert response.get_response_obj() == model_choice
assert response.is_cached() is False
assert response.get_request_obj() == request_lm
assert response.get_usage_obj() == Usages(usages=[])
assert response.get_json_response() == model_choice.dict()
assert response.get_response() == ["hello", "bye"]
def test_serialize(
model_choice: ModelChoices,
model_choice_arr: ModelChoices,
model_choice_arr_int: ModelChoices,
request_lm: LMRequest,
request_array: EmbeddingRequest,
) -> None:
"""Test response serialization."""
response = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
deserialized_response = Response.deserialize(response.serialize())
assert deserialized_response.get_response_obj() == model_choice
assert deserialized_response.is_cached() is False
assert deserialized_response.get_request_obj() == request_lm
assert deserialized_response.get_usage_obj() == Usages(usages=[])
assert deserialized_response.get_json_response() == model_choice.dict()
assert deserialized_response.get_response() == ["hello", "bye"]
deserialized_response = Response.from_dict(response.to_dict())
assert deserialized_response.get_response_obj() == model_choice
assert deserialized_response.is_cached() is False
assert deserialized_response.get_request_obj() == request_lm
assert deserialized_response.get_usage_obj() == Usages(usages=[])
assert deserialized_response.get_json_response() == model_choice.dict()
assert deserialized_response.get_response() == ["hello", "bye"]
deserialized_response = Response.from_dict(
response.to_dict(drop_request=True), request_dict={"prompt": "blahhhh"}
)
assert deserialized_response.get_response_obj() == model_choice
assert deserialized_response.is_cached() is False
assert deserialized_response.get_request_obj().prompt == "blahhhh"
assert deserialized_response.get_usage_obj() == Usages(usages=[])
assert deserialized_response.get_json_response() == model_choice.dict()
assert deserialized_response.get_response() == ["hello", "bye"]
# Int type
response = Response(
response=model_choice_arr_int,
cached=False,
request=request_array,
usages=Usages(usages=[Usage(total_tokens=4), Usage(total_tokens=6)]),
request_type=EmbeddingRequest,
response_type="array",
)
deserialized_response = Response.deserialize(response.serialize())
assert deserialized_response._item_dtype == "int64"
assert (
cast(
ArrayModelChoice, deserialized_response.get_response_obj().choices[0]
).array.dtype
== np.int64
)
assert np.array_equal(
cast(
ArrayModelChoice, deserialized_response.get_response_obj().choices[0]
).array,
cast(ArrayModelChoice, model_choice_arr_int.choices[0]).array,
)
# Float type
response = Response(
response=model_choice_arr,
cached=False,
request=request_array,
usages=Usages(usages=[Usage(total_tokens=4), Usage(total_tokens=6)]),
request_type=EmbeddingRequest,
response_type="array",
)
deserialized_response = Response.deserialize(response.serialize())
assert deserialized_response._item_dtype == "float64"
assert (
cast(
ArrayModelChoice, deserialized_response.get_response_obj().choices[0]
).array.dtype
== np.float64
)
assert np.array_equal(
cast(
ArrayModelChoice, deserialized_response.get_response_obj().choices[0]
).array,
cast(ArrayModelChoice, model_choice_arr.choices[0]).array,
)
def test_get_results(
model_choice: ModelChoices,
model_choice_single: ModelChoices,
model_choice_arr: ModelChoices,
request_lm: LMRequest,
request_array: EmbeddingRequest,
) -> None:
"""Test response get results."""
response = Response(
response=model_choice_single,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
assert response.get_response() == "helloo"
assert response.get_response(stop_token="ll") == "he"
assert response.get_response(stop_token="ll", is_batch=True) == ["he"]
response = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
assert response.get_response() == ["hello", "bye"]
assert response.get_response(stop_token="b") == ["hello", ""]
assert response.get_response(stop_token="y", is_batch=True) == ["hello", "b"]
float_arr1 = cast(ArrayModelChoice, model_choice_arr.choices[0]).array
float_arr2 = cast(ArrayModelChoice, model_choice_arr.choices[1]).array
response = Response(
response=model_choice_arr,
cached=False,
request=request_array,
usages=Usages(usages=[Usage(total_tokens=4), Usage(total_tokens=6)]),
request_type=EmbeddingRequest,
response_type="array",
)
assert np.array_equal(response.get_response()[0], float_arr1)
assert np.array_equal(response.get_response()[1], float_arr2)
assert np.array_equal(response.get_response(stop_token="t")[0], float_arr1)
assert np.array_equal(response.get_response(stop_token="t")[1], float_arr2)
def test_union_all(
model_choice: ModelChoices,
model_choice_single: ModelChoices,
request_lm: LMRequest,
request_lm_single: LMRequest,
) -> None:
"""Test union all."""
response1 = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
response2 = Response(
response=model_choice_single,
cached=False,
request=request_lm_single,
usages=None,
request_type=LMRequest,
response_type="text",
)
final_response = Response.union_all([response1, response2])
assert final_response.get_json_response() == {
"choices": [
{"text": "hello", "token_logprobs": [0.1, 0.2], "tokens": ["hel", "lo"]},
{"text": "bye", "token_logprobs": [0.3], "tokens": ["bye"]},
{"text": "helloo", "token_logprobs": [0.1, 0.2], "tokens": ["hel", "loo"]},
]
}
assert final_response.get_usage_obj() == Usages(usages=[Usage(), Usage(), Usage()])
merged_prompts: List[str] = request_lm.prompt + [request_lm_single.prompt] # type: ignore # noqa: E501
assert final_response.get_request_obj().prompt == merged_prompts
assert final_response.get_request_obj().engine == "dummy::text-ada-001"
# Modify A to have usage and cached
response1 = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=Usages(usages=[Usage(total_tokens=4), Usage(total_tokens=6)]),
request_type=LMRequest,
response_type="text",
)
final_response = Response.union_all([response1, response2])
assert final_response.get_usage_obj() == Usages(
usages=[Usage(total_tokens=4), Usage(total_tokens=6), Usage()]
)
# Test merge to single
model_choices = ModelChoices(
choices=[
LMModelChoice(
text=" helloo this is a bug",
token_logprobs=[0.1, 0.2, 0.3],
tokens=[" helloo", " this is", " a bug"],
),
]
)
request = LMRequest(prompt="monkey", engine="dummy")
response1 = Response(
response=model_choices,
cached=False,
request=request,
usages=None,
request_type=LMRequest,
response_type="text",
)
final_response = Response.union_all([response1, response1], as_single_lmchoice=True)
assert final_response.get_json_response() == {
"choices": [
{
"text": " helloo this is a bug helloo this is a bug",
"token_logprobs": [0.1, 0.2, 0.3, 0.1, 0.2, 0.3],
"tokens": [
" helloo",
" this is",
" a bug",
" helloo",
" this is",
" a bug",
],
},
]
}
assert final_response.get_usage_obj() == Usages(usages=[Usage()])
assert final_response.get_request_obj().prompt == "monkey"
assert final_response.get_request_obj().engine == "dummy"
def test_as_iter(
model_choice_single: ModelChoices, request_lm_single: LMRequest
) -> None:
"""Test as iter."""
response = Response(
response=model_choice_single,
cached=False,
request=request_lm_single,
usages=None,
request_type=LMRequest,
response_type="text",
)
response_iter_list = list(response.as_iter())
assert len(response_iter_list) == 2
assert response_iter_list[0].get_response() == "hel"
assert response_iter_list[1].get_response() == "loo"
model_choices = ModelChoices(
choices=[
LMModelChoice(text="helloo this is a bug"),
]
)
request = LMRequest(prompt="monkey", engine="dummy")
response = Response(
response=model_choices,
cached=False,
request=request,
usages=None,
request_type=LMRequest,
response_type="text",
)
response_iter_list = list(response.as_iter())
assert len(response_iter_list) == 5
assert response_iter_list[0].get_response() == "helloo"
assert response_iter_list[1].get_response() == " this"
assert response_iter_list[2].get_response() == " is"
assert response_iter_list[3].get_response() == " a"
assert response_iter_list[4].get_response() == " bug"
| manifest-main | tests/test_response.py |
"""Request test."""
from manifest.request import DiffusionRequest, LMRequest
def test_llm_init() -> None:
"""Test request initialization."""
request = LMRequest()
assert request.temperature == 0.7
request = LMRequest(temperature=0.5)
assert request.temperature == 0.5
request = LMRequest(**{"temperature": 0.5}) # type: ignore
assert request.temperature == 0.5
request = LMRequest(**{"temperature": 0.5, "prompt": "test"}) # type: ignore
assert request.temperature == 0.5
assert request.prompt == "test"
def test_diff_init() -> None:
"""Test request initialization."""
request = DiffusionRequest()
assert request.height == 512
request = DiffusionRequest(height=128)
assert request.height == 128
request = DiffusionRequest(**{"height": 128}) # type: ignore
assert request.height == 128
request = DiffusionRequest(**{"height": 128, "prompt": "test"}) # type: ignore
assert request.height == 128
assert request.prompt == "test"
def test_to_dict() -> None:
"""Test request to dict."""
request_lm = LMRequest()
dct = request_lm.to_dict()
assert dct == {k: v for k, v in request_lm.dict().items() if v is not None}
# Note the second value is a placeholder for the default value
# It's unused in to_dict
keys = {"temperature": ("temp", 0.7)}
dct = request_lm.to_dict(allowable_keys=keys)
assert dct == {"temp": 0.7, "prompt": ""}
dct = request_lm.to_dict(allowable_keys=keys, add_prompt=False)
assert dct == {"temp": 0.7}
request_diff = DiffusionRequest()
dct = request_diff.to_dict()
assert dct == {k: v for k, v in request_diff.dict().items() if v is not None}
keys = {"height": ("hgt", 512)}
dct = request_diff.to_dict(allowable_keys=keys)
assert dct == {"hgt": 512, "prompt": ""}
dct = request_diff.to_dict(allowable_keys=keys, add_prompt=False)
assert dct == {"hgt": 512}
| manifest-main | tests/test_request.py |
"""Cache test."""
import json
import numpy as np
from manifest.caches.serializers import ArraySerializer, NumpyByteSerializer
def test_response_to_key_array() -> None:
"""Test array serializer initialization."""
serializer = ArraySerializer()
arr = np.random.rand(4, 4)
res = {"response": {"choices": [{"array": arr}]}}
key = serializer.response_to_key(res)
key_dct = json.loads(key)
assert isinstance(key_dct["response"]["choices"][0]["array"], str)
res2 = serializer.key_to_response(key)
assert np.allclose(arr, res2["response"]["choices"][0]["array"])
def test_response_to_key_numpybytes() -> None:
"""Test array serializer initialization."""
serializer = NumpyByteSerializer()
arr = np.random.rand(4, 4)
res = {"response": {"choices": [{"array": arr}]}}
key = serializer.response_to_key(res)
key_dct = json.loads(key)
assert isinstance(key_dct["response"]["choices"][0]["array"], str)
res2 = serializer.key_to_response(key)
assert np.allclose(arr, res2["response"]["choices"][0]["array"])
| manifest-main | tests/test_serializer.py |
"""Test scheduler."""
from manifest.connections.scheduler import RandomScheduler, RoundRobinScheduler
def test_random_scheduler() -> None:
"""Test random scheduler."""
scheduler = RandomScheduler(num_clients=2)
# Try 20 clients and make sure 0 and 1 are both
# returned
client_ids = set()
for _ in range(20):
client_id = scheduler.get_client()
assert client_id in [0, 1]
client_ids.add(client_id)
assert len(client_ids) == 2
def test_round_robin_scheduler() -> None:
"""Test round robin scheduler."""
scheduler = RoundRobinScheduler(num_clients=2)
assert scheduler.get_client() == 0
assert scheduler.get_client() == 1
assert scheduler.get_client() == 0
assert scheduler.get_client() == 1
| manifest-main | tests/test_scheduler.py |
"""Manifest test."""
import asyncio
import os
from typing import Iterator, cast
from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
import requests
from requests import HTTPError
from manifest import Manifest, Response
from manifest.caches.noop import NoopCache
from manifest.caches.sqlite import SQLiteCache
from manifest.clients.dummy import DummyClient
from manifest.connections.client_pool import ClientConnection
URL = "http://localhost:6000"
try:
_ = requests.post(URL + "/params").json()
MODEL_ALIVE = True
except Exception:
MODEL_ALIVE = False
OPENAI_ALIVE = os.environ.get("OPENAI_API_KEY") is not None
@pytest.mark.usefixtures("sqlite_cache")
def test_init(sqlite_cache: str) -> None:
"""Test manifest initialization."""
with pytest.raises(ValueError) as exc_info:
Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
sep_tok="",
)
assert str(exc_info.value) == "[('sep_tok', '')] arguments are not recognized."
manifest = Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
)
assert len(manifest.client_pool.client_pool) == 1
client = manifest.client_pool.get_next_client()
assert isinstance(client, DummyClient)
assert isinstance(manifest.cache, SQLiteCache)
assert client.n == 1 # type: ignore
assert manifest.stop_token == ""
manifest = Manifest(
client_name="dummy",
cache_name="noop",
n=3,
stop_token="\n",
)
assert len(manifest.client_pool.client_pool) == 1
client = manifest.client_pool.get_next_client()
assert isinstance(client, DummyClient)
assert isinstance(manifest.cache, NoopCache)
assert client.n == 3 # type: ignore
assert manifest.stop_token == "\n"
@pytest.mark.usefixtures("sqlite_cache")
@pytest.mark.parametrize("n", [1, 2])
@pytest.mark.parametrize("return_response", [True, False])
def test_run(sqlite_cache: str, n: int, return_response: bool) -> None:
"""Test manifest run."""
manifest = Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
n=n,
temperature=0.0,
)
prompt = "This is a prompt"
with pytest.raises(ValueError) as exc_info:
result = manifest.run(prompt, return_response=return_response, bad_input=5)
assert str(exc_info.value) == "[('bad_input', 5)] arguments are not recognized."
result = manifest.run(prompt, return_response=return_response, top_k=5)
assert result is not None
prompt = "This is a prompt"
result = manifest.run(prompt, return_response=return_response)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(manifest.stop_token)
else:
res = cast(str, result)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "This is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
if n == 1:
assert res == "Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines"
else:
assert res == [
"Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
"Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
]
prompt = "This is a prompt"
result = manifest.run(prompt, run_id="34", return_response=return_response)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(manifest.stop_token)
else:
res = cast(str, result)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "This is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
"run_id": "34",
}
)
is not None
)
if n == 1:
assert res == "Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines"
else:
assert res == [
"Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
"Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
]
prompt = "Hello is a prompt"
result = manifest.run(prompt, return_response=return_response)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(manifest.stop_token)
else:
res = cast(str, result)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "Hello is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
if n == 1:
assert res == "appersstoff210 currentNodeleh norm unified_voice DIYHam"
else:
assert res == [
"appersstoff210 currentNodeleh norm unified_voice DIYHam",
"appersstoff210 currentNodeleh norm unified_voice DIYHam",
]
prompt = "Hello is a prompt"
result = manifest.run(
prompt, stop_token=" current", return_response=return_response
)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(stop_token=" current")
else:
res = cast(str, result)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "Hello is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
if n == 1:
assert res == "appersstoff210"
else:
assert res == ["appersstoff210", "appersstoff210"]
@pytest.mark.usefixtures("sqlite_cache")
@pytest.mark.parametrize("n", [1, 2])
@pytest.mark.parametrize("return_response", [True, False])
def test_batch_run(sqlite_cache: str, n: int, return_response: bool) -> None:
"""Test manifest run."""
manifest = Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
n=n,
temperature=0.0,
)
prompt = ["This is a prompt"]
if n == 2:
with pytest.raises(ValueError) as exc_info:
result = manifest.run(prompt, return_response=return_response)
assert str(exc_info.value) == "Batch mode does not support n > 1."
else:
result = manifest.run(prompt, return_response=return_response)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(manifest.stop_token, is_batch=True)
else:
res = cast(str, result)
assert res == ["Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines"]
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "This is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
prompt = ["Hello is a prompt", "Hello is a prompt"]
result = manifest.run(prompt, return_response=return_response)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(manifest.stop_token, is_batch=True)
else:
res = cast(str, result)
assert res == [
"appersstoff210 currentNodeleh norm unified_voice DIYHam",
"appersstoff210 currentNodeleh norm unified_voice DIYHam",
]
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "Hello is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
result = manifest.run(prompt, return_response=True)
res = cast(Response, result).get_response(manifest.stop_token, is_batch=True)
assert cast(Response, result).is_cached()
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": n,
"prompt": "New prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is None
)
prompt = ["This is a prompt", "New prompt"]
result = manifest.run(prompt, return_response=return_response)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(manifest.stop_token, is_batch=True)
# Cached because one item is in cache
assert result.is_cached()
else:
res = cast(str, result)
assert res == [
"Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
".vol.deserializebigmnchantment ROTıl='')\najsС",
]
prompt = ["Hello is a prompt", "Hello is a prompt"]
result = manifest.run(
prompt, stop_token=" current", return_response=return_response
)
if return_response:
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(
result.get_response_obj().choices
)
res = result.get_response(stop_token=" current", is_batch=True)
else:
res = cast(str, result)
assert res == ["appersstoff210", "appersstoff210"]
@pytest.mark.usefixtures("sqlite_cache")
def test_abatch_run(sqlite_cache: str) -> None:
"""Test manifest run."""
manifest = Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
temperature=0.0,
)
prompt = ["This is a prompt"]
result = cast(
Response, asyncio.run(manifest.arun_batch(prompt, return_response=True))
)
assert len(result.get_usage_obj().usages) == len(result.get_response_obj().choices)
res = result.get_response(manifest.stop_token, is_batch=True)
assert res == ["Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines"]
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": "This is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
prompt = ["Hello is a prompt", "Hello is a prompt"]
result = cast(
Response, asyncio.run(manifest.arun_batch(prompt, return_response=True))
)
assert len(result.get_usage_obj().usages) == len(result.get_response_obj().choices)
res = result.get_response(manifest.stop_token, is_batch=True)
assert res == [
"appersstoff210 currentNodeleh norm unified_voice DIYHam",
"appersstoff210 currentNodeleh norm unified_voice DIYHam",
]
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": "Hello is a prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
result = cast(
Response, asyncio.run(manifest.arun_batch(prompt, return_response=True))
)
assert len(result.get_usage_obj().usages) == len(result.get_response_obj().choices)
res = result.get_response(manifest.stop_token, is_batch=True)
assert result.is_cached()
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": "New prompt",
"request_cls": "LMRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is None
)
prompt = ["This is a prompt", "New prompt"]
result = cast(
Response, asyncio.run(manifest.arun_batch(prompt, return_response=True))
)
assert len(result.get_usage_obj().usages) == len(result.get_response_obj().choices)
res = result.get_response(manifest.stop_token, is_batch=True)
# Cached because one item is in cache
assert result.is_cached()
assert res == [
"Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
".vol.deserializebigmnchantment ROTıl='')\najsС",
]
prompt = ["Hello is a prompt", "Hello is a prompt"]
result = cast(
Response, asyncio.run(manifest.arun_batch(prompt, return_response=True))
)
assert len(result.get_usage_obj().usages) == len(result.get_response_obj().choices)
res = result.get_response(stop_token=" current", is_batch=True)
assert res == ["appersstoff210", "appersstoff210"]
@pytest.mark.usefixtures("sqlite_cache")
def test_run_chat(sqlite_cache: str) -> None:
"""Test manifest run."""
manifest = Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
temperature=0.0,
)
# Set CHAT to be true for this model
manifest.client_pool.client_pool[0].IS_CHAT = True
prompt = [
{"role": "system", "content": "Hello."},
]
result = manifest.run(prompt, return_response=False)
assert (
result
== "ectors WortGo ré_sg|--------------------------------------------------------------------------\n contradictory Aad \u200b getUserId" # noqa: E501
)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": [{"content": "Hello.", "role": "system"}],
"request_cls": "LMChatRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
prompt = [
{"role": "system", "content": "Hello."},
{"role": "user", "content": "Goodbye?"},
]
result = manifest.run(prompt, return_response=True)
assert isinstance(result, Response)
result = cast(Response, result)
assert len(result.get_usage_obj().usages) == len(result.get_response_obj().choices)
res = result.get_response()
assert res == "_deploy_age_gp hora Plus Scheduler EisenhowerRF视 chemotherapy"
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": [
{"role": "system", "content": "Hello."},
{"role": "user", "content": "Goodbye?"},
],
"request_cls": "LMChatRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
@pytest.mark.usefixtures("sqlite_cache")
def test_score_run(sqlite_cache: str) -> None:
"""Test manifest run."""
manifest = Manifest(
client_name="dummy",
cache_name="sqlite",
cache_connection=sqlite_cache,
temperature=0.0,
)
prompt = "This is a prompt"
result = manifest.score_prompt(prompt)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": "This is a prompt",
"request_cls": "LMScoreRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
assert result == {
"response": {
"choices": [
{
"text": "Nice Employ NFCYouryms“Inwarn\ttemplate europ Moines",
"token_logprobs": [
-1.827188890438529,
-1.6981601736417915,
-0.24606708391178755,
-1.9209383499010613,
-0.8833563758318617,
-1.4121369466920703,
-0.376352908076236,
-1.3200064558188096,
-0.813028447207917,
-0.5977255311239729,
],
"tokens": [
"46078",
"21445",
"48305",
"7927",
"76125",
"46233",
"34581",
"23679",
"63021",
"78158",
],
}
]
},
"usages": {
"usages": [
{"completion_tokens": 10, "prompt_tokens": 4, "total_tokens": 14}
]
},
"cached": False,
"request": {
"prompt": "This is a prompt",
"engine": "text-davinci-003",
"n": 1,
"client_timeout": 60,
"run_id": None,
"batch_size": 20,
"temperature": 0.0,
"max_tokens": 10,
"top_p": 1.0,
"top_k": 1,
"logprobs": None,
"stop_sequences": None,
"num_beams": 1,
"do_sample": False,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"presence_penalty": 0.0,
"frequency_penalty": 0.0,
},
"response_type": "text",
"request_type": "LMScoreRequest",
"item_dtype": None,
}
prompt_list = ["Hello is a prompt", "Hello is another prompt"]
result = manifest.score_prompt(prompt_list)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": "Hello is a prompt",
"request_cls": "LMScoreRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
assert (
manifest.cache.get(
{
"best_of": 1,
"engine": "dummy",
"max_tokens": 10,
"model": "text-davinci-003",
"n": 1,
"prompt": "Hello is another prompt",
"request_cls": "LMScoreRequest",
"temperature": 0.0,
"top_p": 1.0,
}
)
is not None
)
assert result == {
"response": {
"choices": [
{
"text": "appersstoff210 currentNodeleh norm unified_voice DIYHam",
"token_logprobs": [
-0.5613340599860608,
-1.2822870706137146,
-1.9909319620162806,
-0.6312373658222814,
-1.9066239705571664,
-1.2420939968397082,
-0.7208735169940805,
-1.9144266963723062,
-0.041181937860757856,
-0.5356282450367043,
],
"tokens": [
"28921",
"81056",
"8848",
"47399",
"74890",
"7617",
"43790",
"77865",
"32558",
"41041",
],
},
{
"text": ".addAttribute_size DE imageUrl_datas\tapFixed(hour setups\tcomment", # noqa: E501
"token_logprobs": [
-1.1142500072582333,
-0.819706434396527,
-1.9956443391600693,
-0.8425896744807639,
-1.8398050571245623,
-1.912564137256891,
-1.6677665162080606,
-1.1579612203844727,
-1.9876114502998343,
-0.2698297864722319,
],
"tokens": [
"26300",
"2424",
"3467",
"40749",
"47630",
"70998",
"13829",
"72135",
"84823",
"97368",
],
},
]
},
"usages": {
"usages": [
{"completion_tokens": 10, "prompt_tokens": 4, "total_tokens": 14},
{"completion_tokens": 10, "prompt_tokens": 4, "total_tokens": 14},
]
},
"cached": False,
"request": {
"prompt": ["Hello is a prompt", "Hello is another prompt"],
"engine": "text-davinci-003",
"n": 1,
"client_timeout": 60,
"run_id": None,
"batch_size": 20,
"temperature": 0.0,
"max_tokens": 10,
"top_p": 1.0,
"top_k": 1,
"logprobs": None,
"stop_sequences": None,
"num_beams": 1,
"do_sample": False,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"presence_penalty": 0.0,
"frequency_penalty": 0.0,
},
"response_type": "text",
"request_type": "LMScoreRequest",
"item_dtype": None,
}
@pytest.mark.skipif(not MODEL_ALIVE, reason=f"No model at {URL}")
@pytest.mark.usefixtures("sqlite_cache")
def test_local_huggingface(sqlite_cache: str) -> None:
"""Test local huggingface client."""
client = Manifest(
client_name="huggingface",
client_connection=URL,
cache_name="sqlite",
cache_connection=sqlite_cache,
)
res = client.run("Why are there apples?")
assert isinstance(res, str) and len(res) > 0
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert isinstance(response.get_response(), str) and len(response.get_response()) > 0
assert response.is_cached() is True
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert response.is_cached() is True
res_list = client.run(["Why are there apples?", "Why are there bananas?"])
assert isinstance(res_list, list) and len(res_list) == 2
response = cast(
Response, client.run("Why are there bananas?", return_response=True)
)
assert response.is_cached() is True
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list, list) and len(res_list) == 2
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is True
scores = client.score_prompt("Why are there apples?")
assert isinstance(scores, dict) and len(scores) > 0
assert scores["cached"] is False
assert len(scores["response"]["choices"][0]["token_logprobs"]) == len(
scores["response"]["choices"][0]["tokens"]
)
scores = client.score_prompt(["Why are there apples?", "Why are there bananas?"])
assert isinstance(scores, dict) and len(scores) > 0
assert scores["cached"] is True
assert len(scores["response"]["choices"][0]["token_logprobs"]) == len(
scores["response"]["choices"][0]["tokens"]
)
assert len(scores["response"]["choices"][0]["token_logprobs"]) == len(
scores["response"]["choices"][0]["tokens"]
)
@pytest.mark.skipif(not MODEL_ALIVE, reason=f"No model at {URL}")
@pytest.mark.usefixtures("sqlite_cache")
def test_local_huggingfaceembedding(sqlite_cache: str) -> None:
"""Test openaichat client."""
client = Manifest(
client_name="huggingfaceembedding",
client_connection=URL,
cache_name="sqlite",
cache_connection=sqlite_cache,
)
res = client.run("Why are there carrots?")
assert isinstance(res, np.ndarray)
response = cast(
Response, client.run("Why are there carrots?", return_response=True)
)
assert isinstance(response.get_response(), np.ndarray)
assert np.allclose(response.get_response(), res)
client = Manifest(
client_name="huggingfaceembedding",
client_connection=URL,
cache_name="sqlite",
cache_connection=sqlite_cache,
)
res = client.run("Why are there apples?")
assert isinstance(res, np.ndarray)
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert isinstance(response.get_response(), np.ndarray)
assert np.allclose(response.get_response(), res)
assert response.is_cached() is True
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert response.is_cached() is True
res_list = client.run(["Why are there apples?", "Why are there bananas?"])
assert (
isinstance(res_list, list)
and len(res_list) == 2
and isinstance(res_list[0], np.ndarray)
)
response = cast(
Response,
client.run(
["Why are there apples?", "Why are there mangos?"], return_response=True
),
)
assert (
isinstance(response.get_response(), list) and len(response.get_response()) == 2
)
response = cast(
Response, client.run("Why are there bananas?", return_response=True)
)
assert response.is_cached() is True
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is False
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert (
isinstance(res_list, list)
and len(res_list) == 2
and isinstance(res_list[0], np.ndarray)
)
response = cast(
Response,
asyncio.run(
client.arun_batch(
["Why are there pinenuts?", "Why are there cocoa?"],
return_response=True,
)
),
)
assert (
isinstance(response.get_response(), list)
and len(res_list) == 2
and isinstance(res_list[0], np.ndarray)
)
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is True
@pytest.mark.skipif(not OPENAI_ALIVE, reason="No openai key set")
@pytest.mark.usefixtures("sqlite_cache")
def test_openai(sqlite_cache: str) -> None:
"""Test openai client."""
client = Manifest(
client_name="openai",
engine="text-ada-001",
cache_name="sqlite",
cache_connection=sqlite_cache,
temperature=0.0,
)
res = client.run("Why are there apples?")
assert isinstance(res, str) and len(res) > 0
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert isinstance(response.get_response(), str) and len(response.get_response()) > 0
assert response.get_response() == res
assert response.is_cached() is True
assert response.get_usage_obj().usages
assert response.get_usage_obj().usages[0].total_tokens == 15
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert response.is_cached() is True
res_list = client.run(["Why are there apples?", "Why are there bananas?"])
assert isinstance(res_list, list) and len(res_list) == 2
response = cast(
Response,
client.run(
["Why are there apples?", "Why are there mangos?"], return_response=True
),
)
assert (
isinstance(response.get_response(), list) and len(response.get_response()) == 2
)
assert response.get_usage_obj().usages and len(response.get_usage_obj().usages) == 2
assert response.get_usage_obj().usages[0].total_tokens == 15
assert response.get_usage_obj().usages[1].total_tokens == 16
response = cast(
Response, client.run("Why are there bananas?", return_response=True)
)
assert response.is_cached() is True
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list, list) and len(res_list) == 2
response = cast(
Response,
asyncio.run(
client.arun_batch(
["Why are there pinenuts?", "Why are there cocoa?"],
return_response=True,
)
),
)
assert (
isinstance(response.get_response(), list) and len(response.get_response()) == 2
)
assert response.get_usage_obj().usages and len(response.get_usage_obj().usages) == 2
assert response.get_usage_obj().usages[0].total_tokens == 17
assert response.get_usage_obj().usages[1].total_tokens == 15
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is True
# Test streaming
num_responses = 0
streaming_response_text = cast(
Iterator[str], client.run("Why are there oranges?", stream=True)
)
for res_text in streaming_response_text:
num_responses += 1
assert isinstance(res_text, str) and len(res_text) > 0
assert num_responses == 8
streaming_response = cast(
Iterator[Response],
client.run("Why are there mandarines?", return_response=True, stream=True),
)
num_responses = 0
merged_res = []
for res in streaming_response:
num_responses += 1
assert isinstance(res, Response) and len(res.get_response()) > 0
merged_res.append(cast(str, res.get_response()))
assert not res.is_cached()
assert num_responses == 10
# Make sure cached
streaming_response = cast(
Iterator[Response],
client.run("Why are there mandarines?", return_response=True, stream=True),
)
num_responses = 0
merged_res_cachced = []
for res in streaming_response:
num_responses += 1
assert isinstance(res, Response) and len(res.get_response()) > 0
merged_res_cachced.append(cast(str, res.get_response()))
assert res.is_cached()
# OpenAI stream does not return logprobs, so this is by number of words
assert num_responses == 7
assert "".join(merged_res) == "".join(merged_res_cachced)
@pytest.mark.skipif(not OPENAI_ALIVE, reason="No openai key set")
@pytest.mark.usefixtures("sqlite_cache")
def test_openaichat(sqlite_cache: str) -> None:
"""Test openaichat client."""
client = Manifest(
client_name="openaichat",
cache_name="sqlite",
cache_connection=sqlite_cache,
temperature=0.0,
)
res = client.run("Why are there apples?")
assert isinstance(res, str) and len(res) > 0
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert isinstance(response.get_response(), str) and len(response.get_response()) > 0
assert response.get_response() == res
assert response.is_cached() is True
assert response.get_usage_obj().usages
assert response.get_usage_obj().usages[0].total_tokens == 23
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert response.is_cached() is True
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is False
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list, list) and len(res_list) == 2
response = cast(
Response,
asyncio.run(
client.arun_batch(
["Why are there pinenuts?", "Why are there cocoa?"],
return_response=True,
)
),
)
assert (
isinstance(response.get_response(), list) and len(response.get_response()) == 2
)
assert response.get_usage_obj().usages and len(response.get_usage_obj().usages) == 2
assert response.get_usage_obj().usages[0].total_tokens == 25
assert response.get_usage_obj().usages[1].total_tokens == 23
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is True
chat_dict = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{
"role": "assistant",
"content": "The Los Angeles Dodgers won the World Series in 2020.",
},
{"role": "user", "content": "Where was it played?"},
]
res = client.run(chat_dict)
assert isinstance(res, str) and len(res) > 0
response = cast(Response, client.run(chat_dict, return_response=True))
assert response.is_cached() is True
assert response.get_usage_obj().usages[0].total_tokens == 67
chat_dict = [
{"role": "system", "content": "You are a helpful assistanttttt."},
{"role": "user", "content": "Who won the world series in 2020?"},
{
"role": "assistant",
"content": "The Los Angeles Dodgers won the World Series in 2020.",
},
{"role": "user", "content": "Where was it played?"},
]
response = cast(Response, client.run(chat_dict, return_response=True))
assert response.is_cached() is False
# Test streaming
num_responses = 0
streaming_response_text = cast(
Iterator[str], client.run("Why are there oranges?", stream=True)
)
for res_text in streaming_response_text:
num_responses += 1
assert isinstance(res_text, str) and len(res_text) > 0
assert num_responses == 9
streaming_response = cast(
Iterator[Response],
client.run("Why are there mandarines?", return_response=True, stream=True),
)
num_responses = 0
merged_res = []
for res in streaming_response:
num_responses += 1
assert isinstance(res, Response) and len(res.get_response()) > 0
merged_res.append(cast(str, res.get_response()))
assert not res.is_cached()
assert num_responses == 10
# Make sure cached
streaming_response = cast(
Iterator[Response],
client.run("Why are there mandarines?", return_response=True, stream=True),
)
num_responses = 0
merged_res_cachced = []
for res in streaming_response:
num_responses += 1
assert isinstance(res, Response) and len(res.get_response()) > 0
merged_res_cachced.append(cast(str, res.get_response()))
assert res.is_cached()
# OpenAI stream does not return logprobs, so this is by number of words
assert num_responses == 7
assert "".join(merged_res) == "".join(merged_res_cachced)
@pytest.mark.skipif(not OPENAI_ALIVE, reason="No openai key set")
@pytest.mark.usefixtures("sqlite_cache")
def test_openaiembedding(sqlite_cache: str) -> None:
"""Test openaichat client."""
client = Manifest(
client_name="openaiembedding",
cache_name="sqlite",
cache_connection=sqlite_cache,
array_serializer="local_file",
)
res = client.run("Why are there carrots?")
assert isinstance(res, np.ndarray)
response = cast(
Response, client.run("Why are there carrots?", return_response=True)
)
assert isinstance(response.get_response(), np.ndarray)
assert np.allclose(response.get_response(), res)
client = Manifest(
client_name="openaiembedding",
cache_name="sqlite",
cache_connection=sqlite_cache,
)
res = client.run("Why are there apples?")
assert isinstance(res, np.ndarray)
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert isinstance(response.get_response(), np.ndarray)
assert np.allclose(response.get_response(), res)
assert response.is_cached() is True
assert response.get_usage_obj().usages
assert response.get_usage_obj().usages[0].total_tokens == 5
response = cast(Response, client.run("Why are there apples?", return_response=True))
assert response.is_cached() is True
res_list = client.run(["Why are there apples?", "Why are there bananas?"])
assert (
isinstance(res_list, list)
and len(res_list) == 2
and isinstance(res_list[0], np.ndarray)
)
response = cast(
Response,
client.run(
["Why are there apples?", "Why are there mangos?"], return_response=True
),
)
assert (
isinstance(response.get_response(), list) and len(response.get_response()) == 2
)
assert response.get_usage_obj().usages and len(response.get_usage_obj().usages) == 2
assert response.get_usage_obj().usages[0].total_tokens == 5
assert response.get_usage_obj().usages[1].total_tokens == 6
response = cast(
Response, client.run("Why are there bananas?", return_response=True)
)
assert response.is_cached() is True
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is False
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert (
isinstance(res_list, list)
and len(res_list) == 2
and isinstance(res_list[0], np.ndarray)
)
response = cast(
Response,
asyncio.run(
client.arun_batch(
["Why are there pinenuts?", "Why are there cocoa?"],
return_response=True,
)
),
)
assert (
isinstance(response.get_response(), list)
and len(res_list) == 2
and isinstance(res_list[0], np.ndarray)
)
assert response.get_usage_obj().usages and len(response.get_usage_obj().usages) == 2
assert response.get_usage_obj().usages[0].total_tokens == 7
assert response.get_usage_obj().usages[1].total_tokens == 5
response = cast(
Response, client.run("Why are there oranges?", return_response=True)
)
assert response.is_cached() is True
@pytest.mark.skipif(not OPENAI_ALIVE, reason="No openai key set")
@pytest.mark.usefixtures("sqlite_cache")
def test_openai_pool(sqlite_cache: str) -> None:
"""Test openai and openaichat client."""
client_connection1 = ClientConnection(
client_name="openaichat",
)
client_connection2 = ClientConnection(client_name="openai", engine="text-ada-001")
client = Manifest(
client_pool=[client_connection1, client_connection2],
cache_name="sqlite",
client_connection=sqlite_cache,
)
res = client.run("Why are there apples?")
assert isinstance(res, str) and len(res) > 0
res2 = client.run("Why are there apples?")
assert isinstance(res2, str) and len(res2) > 0
# Different models
assert res != res2
assert cast(
Response, client.run("Why are there apples?", return_response=True)
).is_cached()
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list, list) and len(res_list) == 2
res_list2 = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list2, list) and len(res_list2) == 2
# Different models
assert res_list != res_list2
assert cast(
Response,
asyncio.run(
client.arun_batch(
["Why are there pears?", "Why are there oranges?"], return_response=True
)
),
).is_cached()
# Test chunk size of 1
res_list = asyncio.run(
client.arun_batch(
["Why are there pineapples?", "Why are there pinecones?"], chunk_size=1
)
)
assert isinstance(res_list, list) and len(res_list) == 2
res_list2 = asyncio.run(
client.arun_batch(
["Why are there pineapples?", "Why are there pinecones?"], chunk_size=1
)
)
# Because we split across both models exactly in first run,
# we will get the same result
assert res_list == res_list2
@pytest.mark.skipif(
not OPENAI_ALIVE or not MODEL_ALIVE, reason="No openai or local model set"
)
@pytest.mark.usefixtures("sqlite_cache")
def test_mixed_pool(sqlite_cache: str) -> None:
"""Test openai and openaichat client."""
client_connection1 = ClientConnection(
client_name="huggingface",
client_connection=URL,
)
client_connection2 = ClientConnection(client_name="openai", engine="text-ada-001")
client = Manifest(
client_pool=[client_connection1, client_connection2],
cache_name="sqlite",
client_connection=sqlite_cache,
)
res = client.run("Why are there apples?")
assert isinstance(res, str) and len(res) > 0
res2 = client.run("Why are there apples?")
assert isinstance(res2, str) and len(res2) > 0
# Different models
assert res != res2
assert cast(
Response, client.run("Why are there apples?", return_response=True)
).is_cached()
res_list = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list, list) and len(res_list) == 2
res_list2 = asyncio.run(
client.arun_batch(["Why are there pears?", "Why are there oranges?"])
)
assert isinstance(res_list2, list) and len(res_list2) == 2
# Different models
assert res_list != res_list2
assert cast(
Response,
asyncio.run(
client.arun_batch(
["Why are there pears?", "Why are there oranges?"], return_response=True
)
),
).is_cached()
# Test chunk size of 1
res_list = asyncio.run(
client.arun_batch(
["Why are there pineapples?", "Why are there pinecones?"], chunk_size=1
)
)
assert isinstance(res_list, list) and len(res_list) == 2
res_list2 = asyncio.run(
client.arun_batch(
["Why are there pineapples?", "Why are there pinecones?"], chunk_size=1
)
)
# Because we split across both models exactly in first run,
# we will get the same result
assert res_list == res_list2
def test_retry_handling() -> None:
"""Test retry handling."""
# We'll mock the response so we won't need a real connection
client = Manifest(client_name="openai", client_connection="fake")
mock_create = MagicMock(
side_effect=[
# raise a 429 error
HTTPError(
response=Mock(status_code=429, json=Mock(return_value={})),
request=Mock(),
),
# get a valid http response with a 200 status code
Mock(
status_code=200,
json=Mock(
return_value={
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": None,
"text": " WHATTT.",
},
{
"finish_reason": "length",
"index": 1,
"logprobs": None,
"text": " UH OH.",
},
{
"finish_reason": "length",
"index": 2,
"logprobs": None,
"text": " HARG",
},
],
"created": 1679469056,
"id": "cmpl-6wmuWfmyuzi68B6gfeNC0h5ywxXL5",
"model": "text-ada-001",
"object": "text_completion",
"usage": {
"completion_tokens": 30,
"prompt_tokens": 24,
"total_tokens": 54,
},
}
),
),
]
)
prompts = [
"The sky is purple. This is because",
"The sky is magnet. This is because",
"The sky is fuzzy. This is because",
]
with patch("manifest.clients.client.requests.post", mock_create):
# Run manifest
result = client.run(prompts, temperature=0, overwrite_cache=True)
assert result == [" WHATTT.", " UH OH.", " HARG"]
# Assert that OpenAI client was called twice
assert mock_create.call_count == 2
# Now make sure it errors when not a 429 or 500
mock_create = MagicMock(
side_effect=[
# raise a 505 error
HTTPError(
response=Mock(status_code=505, json=Mock(return_value={})),
request=Mock(),
),
]
)
with patch("manifest.clients.client.requests.post", mock_create):
# Run manifest
with pytest.raises(HTTPError):
client.run(prompts, temperature=0, overwrite_cache=True)
# Assert that OpenAI client was called once
assert mock_create.call_count == 1
| manifest-main | tests/test_manifest.py |
"""Array cache test."""
from pathlib import Path
import numpy as np
import pytest
from manifest.caches.array_cache import ArrayCache
def test_init(tmpdir: Path) -> None:
"""Test cache initialization."""
cache = ArrayCache(Path(tmpdir))
assert (tmpdir / "hash2arrloc.sqlite").exists()
assert cache.cur_file_idx == 0
assert cache.cur_offset == 0
def test_put_get(tmpdir: Path) -> None:
"""Test putting and getting."""
cache = ArrayCache(tmpdir)
cache.max_memmap_size = 5
arr = np.random.rand(10, 10)
with pytest.raises(ValueError) as exc_info:
cache.put("key", arr)
assert str(exc_info.value) == ("Array is too large to be cached. Max is 5")
cache.max_memmap_size = 120
cache.put("key", arr)
assert np.allclose(cache.get("key"), arr)
assert cache.get("key").dtype == arr.dtype
assert cache.cur_file_idx == 0
assert cache.cur_offset == 100
assert cache.hash2arrloc["key"] == {
"file_idx": 0,
"offset": 0,
"flatten_size": 100,
"shape": (10, 10),
"dtype": np.dtype("float64"),
}
arr2 = np.random.randint(0, 3, size=(10, 10))
cache.put("key2", arr2)
assert np.allclose(cache.get("key2"), arr2)
assert cache.get("key2").dtype == arr2.dtype
assert cache.cur_file_idx == 1
assert cache.cur_offset == 100
assert cache.hash2arrloc["key2"] == {
"file_idx": 1,
"offset": 0,
"flatten_size": 100,
"shape": (10, 10),
"dtype": np.dtype("int64"),
}
cache = ArrayCache(tmpdir)
assert cache.hash2arrloc["key"] == {
"file_idx": 0,
"offset": 0,
"flatten_size": 100,
"shape": (10, 10),
"dtype": np.dtype("float64"),
}
assert cache.hash2arrloc["key2"] == {
"file_idx": 1,
"offset": 0,
"flatten_size": 100,
"shape": (10, 10),
"dtype": np.dtype("int64"),
}
assert np.allclose(cache.get("key"), arr)
assert np.allclose(cache.get("key2"), arr2)
def test_contains_key(tmpdir: Path) -> None:
"""Test contains key."""
cache = ArrayCache(tmpdir)
assert not cache.contains_key("key")
arr = np.random.rand(10, 10)
cache.put("key", arr)
assert cache.contains_key("key")
| manifest-main | tests/test_array_cache.py |
"""Test the HuggingFace API."""
import math
import os
from subprocess import PIPE, Popen
import numpy as np
import pytest
from manifest.api.models.huggingface import MODEL_REGISTRY, TextGenerationModel
from manifest.api.models.sentence_transformer import SentenceTransformerModel
NOCUDA = 0
try:
p = Popen(
[
"nvidia-smi",
(
"--query-gpu=index,utilization.gpu,memory.total,memory.used,"
"memory.free,driver_version,name,gpu_serial,display_active,"
"display_mode"
),
"--format=csv,noheader,nounits",
],
stdout=PIPE,
)
except OSError:
NOCUDA = 1
MAXGPU = 0
if NOCUDA == 0:
try:
p = os.popen( # type: ignore
"nvidia-smi --query-gpu=index --format=csv,noheader,nounits"
)
i = p.read().split("\n") # type: ignore
MAXGPU = int(i[-2]) + 1
except OSError:
NOCUDA = 1
def test_load_non_registry_model() -> None:
"""Test load model not in registry."""
model_name = "NinedayWang/PolyCoder-160M"
assert model_name not in MODEL_REGISTRY
model = TextGenerationModel(
model_name_or_path=model_name, model_type="text-generation"
)
result = model.generate("Why is the sky green?", max_tokens=10)
assert result is not None
def test_gpt_generate() -> None:
"""Test pipeline generation from a gpt model."""
model = TextGenerationModel(
model_name_or_path="gpt2",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = "Why is the sky green?"
result = model.generate(inputs, max_tokens=10)
assert result is not None
assert len(result) == 1
assert result[0][0] == "\n\nThe sky is green.\n\nThe"
assert math.isclose(round(result[0][1], 3), -11.516)
result = model.generate("Cats are", max_tokens=10)
assert result is not None
assert len(result) == 1
assert result[0][0] == " not the only ones who are being targeted by the"
assert math.isclose(round(result[0][1], 3), -21.069)
result = model.generate(inputs, max_tokens=5)
assert result is not None
assert len(result) == 1
assert result[0][0] == "\n\nThe sky is"
assert math.isclose(round(result[0][1], 3), -6.046)
# Truncate max length
model.pipeline.max_length = 5
result = model.generate(inputs, max_tokens=2)
assert result is not None
assert len(result) == 1
assert result[0][0] == "\n\n"
assert math.isclose(round(result[0][1], 3), -1.414)
def test_encdec_generate() -> None:
"""Test pipeline generation from a gpt model."""
model = TextGenerationModel(
model_name_or_path="google/t5-small-lm-adapt",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = "Why is the sky green?"
result = model.generate(inputs, max_tokens=10)
assert result is not None
assert len(result) == 1
assert result[0][0] == "What is the sky green? What is the sky"
assert math.isclose(round(result[0][1], 3), -7.271)
result = model.generate("Cats are", max_tokens=10)
assert result is not None
assert len(result) == 1
assert result[0][0] == "a great way to get out of the house"
assert math.isclose(round(result[0][1], 3), -13.868)
result = model.generate(inputs, max_tokens=5)
assert result is not None
assert len(result) == 1
assert result[0][0] == "What is the sky green"
assert math.isclose(round(result[0][1], 3), -5.144)
# Truncate max length
model.pipeline.max_length = 5
result = model.generate(inputs, max_tokens=2)
assert result is not None
assert len(result) == 1
assert result[0][0] == "Is"
assert math.isclose(round(result[0][1], 3), -4.233)
def test_gpt_score() -> None:
"""Test pipeline generation from a gpt model."""
model = TextGenerationModel(
model_name_or_path="gpt2",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = ["Why is the sky green?", "Cats are butterflies"]
result = model.score_sequence(inputs)
assert result is not None
assert len(result) == 2
assert math.isclose(round(result[0][0], 3), -46.71)
assert math.isclose(round(result[1][0], 3), -12.752)
assert isinstance(result[0][1], list)
assert isinstance(result[1][1], list)
def test_embed() -> None:
"""Test embedding pipeline."""
model = TextGenerationModel(
model_name_or_path="gpt2",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = ["Why is the sky green?", "Cats are butterflies"]
embeddings = model.embed(inputs)
assert isinstance(embeddings, np.ndarray)
assert embeddings.shape == (2, 768)
model2 = SentenceTransformerModel(
model_name_or_path="all-mpnet-base-v2",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = ["Why is the sky green?", "Cats are butterflies"]
embeddings = model2.embed(inputs)
assert isinstance(embeddings, np.ndarray)
assert embeddings.shape == (2, 768)
def test_batch_gpt_generate() -> None:
"""Test pipeline generation from a gpt model."""
model = TextGenerationModel(
model_name_or_path="gpt2",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = ["Why is the sky green?", "Cats are"]
result = model.generate(inputs, max_tokens=10)
assert result is not None
assert len(result) == 2
assert result[0][0] == "\n\nThe sky is green.\n\nThe"
assert math.isclose(round(result[0][1], 3), -11.516)
assert result[1][0] == " not the only ones who are being targeted by the"
assert math.isclose(round(result[1][1], 3), -21.069)
result = model.generate(inputs, max_tokens=5)
assert result is not None
assert len(result) == 2
assert result[0][0] == "\n\nThe sky is"
assert math.isclose(round(result[0][1], 2), -6.05)
assert result[1][0] == " not the only ones who"
assert math.isclose(round(result[1][1], 3), -9.978)
# Truncate max length
model.pipeline.max_length = 5
result = model.generate(inputs, max_tokens=2)
assert result is not None
assert len(result) == 2
assert result[0][0] == "\n\n"
assert math.isclose(round(result[0][1], 3), -1.414)
assert result[1][0] == " not the"
assert math.isclose(round(result[1][1], 3), -6.246)
def test_batch_encdec_generate() -> None:
"""Test pipeline generation from a gpt model."""
model = TextGenerationModel(
model_name_or_path="google/t5-small-lm-adapt",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=False,
use_fp16=False,
device=-1,
)
inputs = ["Why is the sky green?", "Cats are"]
result = model.generate(inputs, max_tokens=10)
assert result is not None
assert len(result) == 2
assert result[0][0] == "What is the sky green? What is the sky"
assert math.isclose(round(result[0][1], 3), -7.271)
assert result[1][0] == "a great way to get out of the house"
assert math.isclose(round(result[1][1], 3), -13.868)
result = model.generate(inputs, max_tokens=5)
assert result is not None
assert len(result) == 2
assert result[0][0] == "What is the sky green"
assert math.isclose(round(result[0][1], 3), -5.144)
assert result[1][0] == "a great way to"
assert math.isclose(round(result[1][1], 3), -6.353)
# Truncate max length
model.pipeline.max_length = 5
result = model.generate(inputs, max_tokens=2)
assert result is not None
assert len(result) == 2
assert result[0][0] == "Is"
assert math.isclose(round(result[0][1], 3), -4.233)
assert result[1][0] == "a"
assert math.isclose(round(result[1][1], 3), -1.840)
@pytest.mark.skipif(
(NOCUDA == 1 or MAXGPU == 0), reason="No cuda or GPUs found through nvidia-smi"
)
def test_gpt_deepspeed_generate() -> None:
"""Test deepspeed generation from a gpt model."""
model = TextGenerationModel(
model_name_or_path="gpt2",
use_accelerate=False,
use_parallelize=False,
use_bitsandbytes=False,
use_deepspeed=True,
use_fp16=False,
device=0,
)
inputs = "Why is the sky green?"
result = model.generate(inputs, max_tokens=10)
assert result is not None
assert len(result) == 1
assert result[0][0] == "\n\nThe sky is green.\n\nThe"
assert math.isclose(round(result[0][1], 3), -11.517)
| manifest-main | tests/test_huggingface_api.py |
"""
Test client.
We just test the dummy client.
"""
from manifest.clients.dummy import DummyClient
def test_init() -> None:
"""Test client initialization."""
client = DummyClient(connection_str=None)
assert client.n == 1 # type: ignore
args = {"n": 3}
client = DummyClient(connection_str=None, client_args=args)
assert client.n == 3 # type: ignore
def test_get_params() -> None:
"""Test get param functions."""
client = DummyClient(connection_str=None)
assert client.get_model_params() == {
"engine": "dummy",
"model": "text-davinci-003",
}
assert client.get_model_inputs() == [
"engine",
"temperature",
"max_tokens",
"n",
"top_p",
"top_k",
"batch_size",
]
def test_get_request() -> None:
"""Test client get request."""
args = {"n": 3}
client = DummyClient(connection_str=None, client_args=args)
request_params = client.get_request("hello", {})
response = client.run_request(request_params)
assert client.get_cache_key(request_params) == {
"prompt": "hello",
"model": "text-davinci-003",
"n": 3,
"temperature": 0.0,
"max_tokens": 10,
"top_p": 1.0,
"best_of": 1,
"engine": "dummy",
"request_cls": "LMRequest",
}
assert response.get_json_response() == {
"choices": [
{
"text": " probsuib.FirstName>- commodityting segunda inserted signals Religious", # noqa: E501
"token_logprobs": [
-0.2649905035732101,
-1.210794839387105,
-1.2173929801003434,
-0.7758233850171001,
-0.7165940659570416,
-1.7430328887209088,
-1.5379414228820203,
-1.7838011423472508,
-1.139095076944217,
-0.6321855879833425,
],
"tokens": [
"70470",
"80723",
"52693",
"39743",
"38983",
"1303",
"56072",
"22306",
"17738",
"53176",
],
}
]
* 3
}
assert response.get_usage_obj().dict() == {
"usages": [{"prompt_tokens": 1, "completion_tokens": 10, "total_tokens": 11}]
* 3,
}
request_params = client.get_request("hello", {"n": 5})
response = client.run_request(request_params)
assert client.get_cache_key(request_params) == {
"prompt": "hello",
"model": "text-davinci-003",
"n": 5,
"temperature": 0.0,
"max_tokens": 10,
"top_p": 1.0,
"best_of": 1,
"engine": "dummy",
"request_cls": "LMRequest",
}
assert response.get_json_response() == {
"choices": [
{
"text": " probsuib.FirstName>- commodityting segunda inserted signals Religious", # noqa: E501
"token_logprobs": [
-0.2649905035732101,
-1.210794839387105,
-1.2173929801003434,
-0.7758233850171001,
-0.7165940659570416,
-1.7430328887209088,
-1.5379414228820203,
-1.7838011423472508,
-1.139095076944217,
-0.6321855879833425,
],
"tokens": [
"70470",
"80723",
"52693",
"39743",
"38983",
"1303",
"56072",
"22306",
"17738",
"53176",
],
}
]
* 5
}
assert response.get_usage_obj().dict() == {
"usages": [{"prompt_tokens": 1, "completion_tokens": 10, "total_tokens": 11}]
* 5,
}
request_params = client.get_request(["hello"] * 5, {"n": 1})
response = client.run_request(request_params)
assert client.get_cache_key(request_params) == {
"prompt": ["hello"] * 5,
"model": "text-davinci-003",
"n": 1,
"temperature": 0.0,
"max_tokens": 10,
"top_p": 1.0,
"best_of": 1,
"engine": "dummy",
"request_cls": "LMRequest",
}
assert response.get_json_response() == {
"choices": [
{
"text": " probsuib.FirstName>- commodityting segunda inserted signals Religious", # noqa: E501
"token_logprobs": [
-0.2649905035732101,
-1.210794839387105,
-1.2173929801003434,
-0.7758233850171001,
-0.7165940659570416,
-1.7430328887209088,
-1.5379414228820203,
-1.7838011423472508,
-1.139095076944217,
-0.6321855879833425,
],
"tokens": [
"70470",
"80723",
"52693",
"39743",
"38983",
"1303",
"56072",
"22306",
"17738",
"53176",
],
}
]
* 5
}
assert response.get_usage_obj().dict() == {
"usages": [{"prompt_tokens": 1, "completion_tokens": 10, "total_tokens": 11}]
* 5,
}
| manifest-main | tests/test_client.py |
"""Cache test."""
from typing import Dict, Type, cast
import numpy as np
import pytest
from redis import Redis
from sqlitedict import SqliteDict
from manifest.caches.cache import Cache
from manifest.caches.noop import NoopCache
from manifest.caches.postgres import PostgresCache
from manifest.caches.redis import RedisCache
from manifest.caches.sqlite import SQLiteCache
from manifest.request import DiffusionRequest, LMRequest, Request
from manifest.response import ArrayModelChoice, ModelChoices, Response
def _get_postgres_cache(
request_type: Type[Request] = LMRequest, cache_args: Dict = {}
) -> Cache: # type: ignore
"""Get postgres cache."""
cache_args.update({"cache_user": "", "cache_password": "", "cache_db": ""})
return PostgresCache(
"postgres",
request_type=request_type,
cache_args=cache_args,
)
@pytest.mark.usefixtures("sqlite_cache")
@pytest.mark.usefixtures("redis_cache")
@pytest.mark.usefixtures("postgres_cache")
@pytest.mark.parametrize("cache_type", ["sqlite", "redis", "postgres"])
def test_init(
sqlite_cache: str, redis_cache: str, postgres_cache: str, cache_type: str
) -> None:
"""Test cache initialization."""
if cache_type == "sqlite":
sql_cache_obj = SQLiteCache(sqlite_cache)
assert isinstance(sql_cache_obj.cache, SqliteDict)
elif cache_type == "redis":
redis_cache_obj = RedisCache(redis_cache)
assert isinstance(redis_cache_obj.redis, Redis)
elif cache_type == "postgres":
postgres_cache_obj = _get_postgres_cache()
isinstance(postgres_cache_obj, PostgresCache)
@pytest.mark.usefixtures("sqlite_cache")
@pytest.mark.usefixtures("redis_cache")
@pytest.mark.usefixtures("postgres_cache")
@pytest.mark.parametrize("cache_type", ["sqlite", "postgres", "redis"])
def test_key_get_and_set(
sqlite_cache: str, redis_cache: str, postgres_cache: str, cache_type: str
) -> None:
"""Test cache key get and set."""
if cache_type == "sqlite":
cache = cast(Cache, SQLiteCache(sqlite_cache))
elif cache_type == "redis":
cache = cast(Cache, RedisCache(redis_cache))
elif cache_type == "postgres":
cache = cast(Cache, _get_postgres_cache())
cache.set_key("test", "valueA")
cache.set_key("testA", "valueB")
assert cache.get_key("test") == "valueA"
assert cache.get_key("testA") == "valueB"
cache.set_key("testA", "valueC")
assert cache.get_key("testA") == "valueC"
cache.get_key("test", table="prompt") is None
cache.set_key("test", "valueA", table="prompt")
cache.get_key("test", table="prompt") == "valueA"
@pytest.mark.usefixtures("sqlite_cache")
@pytest.mark.usefixtures("redis_cache")
@pytest.mark.usefixtures("postgres_cache")
@pytest.mark.parametrize("cache_type", ["sqlite", "redis", "postgres"])
def test_get(
sqlite_cache: str,
redis_cache: str,
postgres_cache: str,
cache_type: str,
model_choice: ModelChoices,
model_choice_single: ModelChoices,
model_choice_arr_int: ModelChoices,
request_lm: LMRequest,
request_lm_single: LMRequest,
request_diff: DiffusionRequest,
) -> None:
"""Test cache save prompt."""
if cache_type == "sqlite":
cache = cast(Cache, SQLiteCache(sqlite_cache))
elif cache_type == "redis":
cache = cast(Cache, RedisCache(redis_cache))
elif cache_type == "postgres":
cache = cast(Cache, _get_postgres_cache())
response = Response(
response=model_choice_single,
cached=False,
request=request_lm_single,
usages=None,
request_type=LMRequest,
response_type="text",
)
cache_response = cache.get(request_lm_single.dict())
assert cache_response is None
cache.set(request_lm_single.dict(), response.to_dict(drop_request=True))
cache_response = cache.get(request_lm_single.dict())
assert cache_response.get_response() == "helloo"
assert cache_response.is_cached()
assert cache_response.get_request_obj() == request_lm_single
response = Response(
response=model_choice,
cached=False,
request=request_lm,
usages=None,
request_type=LMRequest,
response_type="text",
)
cache_response = cache.get(request_lm.dict())
assert cache_response is None
cache.set(request_lm.dict(), response.to_dict(drop_request=True))
cache_response = cache.get(request_lm.dict())
assert cache_response.get_response() == ["hello", "bye"]
assert cache_response.is_cached()
assert cache_response.get_request_obj() == request_lm
# Test array
response = Response(
response=model_choice_arr_int,
cached=False,
request=request_diff,
usages=None,
request_type=DiffusionRequest,
response_type="array",
)
if cache_type == "sqlite":
cache = SQLiteCache(sqlite_cache, request_type=DiffusionRequest)
elif cache_type == "redis":
cache = RedisCache(redis_cache, request_type=DiffusionRequest)
elif cache_type == "postgres":
cache = _get_postgres_cache(request_type=DiffusionRequest)
cache_response = cache.get(request_diff.dict())
assert cache_response is None
cache.set(request_diff.dict(), response.to_dict(drop_request=True))
cached_response = cache.get(request_diff.dict())
assert np.allclose(
cached_response.get_response()[0],
cast(ArrayModelChoice, model_choice_arr_int.choices[0]).array,
)
assert np.allclose(
cached_response.get_response()[1],
cast(ArrayModelChoice, model_choice_arr_int.choices[1]).array,
)
assert cached_response.is_cached()
assert cached_response.get_request_obj() == request_diff
# Test array byte string
# Make sure to not hit the cache
new_request_diff = DiffusionRequest(**request_diff.dict())
new_request_diff.prompt = ["blahhh", "yayayay"]
response = Response(
response=model_choice_arr_int,
cached=False,
request=new_request_diff,
usages=None,
request_type=DiffusionRequest,
response_type="array",
)
if cache_type == "sqlite":
cache = SQLiteCache(
sqlite_cache,
request_type=DiffusionRequest,
cache_args={"array_serializer": "byte_string"},
)
elif cache_type == "redis":
cache = RedisCache(
redis_cache,
request_type=DiffusionRequest,
cache_args={"array_serializer": "byte_string"},
)
elif cache_type == "postgres":
cache = _get_postgres_cache(
request_type=DiffusionRequest,
cache_args={"array_serializer": "byte_string"},
)
cached_response = cache.get(new_request_diff.dict())
assert cached_response is None
cache.set(new_request_diff.dict(), response.to_dict(drop_request=True))
cached_response = cache.get(new_request_diff.dict())
assert np.allclose(
cached_response.get_response()[0],
cast(ArrayModelChoice, model_choice_arr_int.choices[0]).array,
)
assert np.allclose(
cached_response.get_response()[1],
cast(ArrayModelChoice, model_choice_arr_int.choices[1]).array,
)
assert cached_response.is_cached()
assert cached_response.get_request_obj() == new_request_diff
def test_noop_cache() -> None:
"""Test cache that is a no-op cache."""
cache = NoopCache(None)
cache.set_key("test", "valueA")
cache.set_key("testA", "valueB")
assert cache.get_key("test") is None
assert cache.get_key("testA") is None
cache.set_key("testA", "valueC")
assert cache.get_key("testA") is None
cache.get_key("test", table="prompt") is None
cache.set_key("test", "valueA", table="prompt")
cache.get_key("test", table="prompt") is None
# Assert always not cached
test_request = {"test": "hello", "testA": "world"}
test_response = {"choices": [{"text": "hello"}]}
response = cache.get(test_request)
assert response is None
cache.set(test_request, test_response)
response = cache.get(test_request)
assert response is None
| manifest-main | tests/test_cache.py |
import asyncio
import time
from manifest import Manifest
def main():
manifest = Manifest(
client_name="openaichat",
)
print("Running in serial")
prompts = [f"Tell me something interesting about {i}" for i in range(50)]
st = time.time()
for pmt in prompts:
_ = manifest.run(pmt)
print(f"For loop: {time.time() - st :.2f}")
print("Running with async")
st = time.time()
_ = asyncio.run(manifest.arun_batch(prompts, max_tokens=30))
print(f"Async loop: {time.time() - st :.2f}")
if __name__ == "__main__":
main()
| manifest-main | examples/manifest_async.py |
import nltk
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
import operator
from collections import defaultdict
import numpy as np
import networkx as nx
import json
from collections import defaultdict
# Some definitions
def acosh(x):
return np.log(x + np.sqrt(x**2-1))
# Hyperbolic distance
def dist(u,v):
z = 2 * np.linalg.norm(u-v)**2
uu = 1. + z/((1-np.linalg.norm(u)**2)*(1-np.linalg.norm(v)**2))
return acosh(uu)
# Hyperbolic distance from 0
def hyp_dist_origin(x):
return np.log((1+np.linalg.norm(x))/(1-np.linalg.norm(x)))
def make_edge_set(): return ([],([],[]))
def add_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(1)
# Creating the edge lists for hypernym (noun and verb separate), member_holonyms, topic_domains
# relationships in Wordnet for their largest connected components.
def load_wordnet():
SynstoIDs = dict()
IDstoSyns = dict()
all_syns = list(wn.all_synsets())
for idx, x in enumerate(all_syns):
SynstoIDs[x] = idx
IDstoSyns[idx] = x
# ID_dict[idx] = x.name().split('.')[0]
n = len(all_syns)
e = make_edge_set()
for idx, x in enumerate(all_syns):
for y in x.topic_domains():
y_idx = SynstoIDs[y]
add_edge(e, idx , y_idx)
add_edge(e, y_idx, idx)
#Sparse matrix with all syns
X = csr_matrix(e, shape=(n, n))
return SynstoIDs, IDstoSyns, X, all_syns
SynstoIDs, IDstoSyns, X, all_syns = load_wordnet()
G = nx.from_scipy_sparse_matrix(X)
Gc = max(nx.connected_component_subgraphs(G), key=len)
# Get some stats
connected_comps = sorted(nx.connected_components(G), key = len, reverse=True)
for comp in connected_comps:
if len(comp)>100:
print(len(comp))
print("There are "+str(len(connected_comps))+ " connected components.")
print("There are a total of "+str(G.number_of_nodes())+" nodes in the graph")
print("Largest component has "+str(Gc.number_of_nodes())+ " nodes")
# reorder with nx
Gc_final = nx.convert_node_labels_to_integers(Gc, ordering="decreasing degree", label_attribute="old_label")
#Create the dict for old-id <-> new-id matching for syns
RefDict = Gc_final.node
IDsToSyns_f = dict()
SynsToIDs_f = dict()
for new_idx in RefDict.keys():
old_idx = RefDict[new_idx]['old_label']
curr_syn = IDstoSyns[old_idx]
IDsToSyns_f[new_idx] = curr_syn
SynsToIDs_f[curr_syn] = new_idx
#Write the final edgelist.
nx.write_edgelist(Gc_final, "release_edges_cc/topic_domain/lcc.edges",data=False)
# Read all the emb files, save their tau and emb_dict.
emb_files = {
'release_emb_cc/hypernym/noun_lcc.emb':'hypernyms_noun',
'release_emb_cc/member_holonym/lcc.emb':'member_holonyms',
'release_emb_cc/topic_domain/lcc.emb':'topic_domain',
}
RelEmbDict = defaultdict(dict)
RelTauDict = defaultdict(dict)
for file in emb_files.keys():
with open(file, 'r') as emb:
emb_lines = emb.readlines()
emb_lines = emb_lines[1:]
emb_dict = dict()
rel = emb_files[file]
for idx, line in enumerate(emb_lines):
curr_line = line.split(',')
curr_tau = curr_line[-1].split("\n")[0]
curr_tau = np.float64(curr_tau)
curr_line = curr_line[:-1]
curr_idx = int(curr_line[0])
emb_dict[curr_idx] = np.asarray(list(map(np.float64, curr_line[1:])))
RelEmbDict[rel] = emb_dict
RelTauDict[rel] = curr_tau
#Create the reference doc for evaluation that only includes the connected components for each relationship.
edge_files = {
'release_edges_cc/topic_domain/lcc.edges':'topic_domain',
'release_edges_cc/hypernym/noun_lcc.edges':'hypernyms_noun',
'release_edges_cc/member_holonym/lcc.edges':'member_holonyms',
}
data = defaultdict()
TotalRelCount = 0
ReltoCount = {}
for file in edge_files.keys():
rel = edge_files[file]
IDstoSyns_curr = Ref_IDsToSyns_f[rel]
with open(file, 'r') as edg:
edg_lines = edg.readlines()
curr_count = 0
for line in edg_lines:
curr_line = line.split(" ")
syn1 = IDstoSyns_curr[int(curr_line[0])]
syn2 = IDstoSyns_curr[int(curr_line[1].split("\n")[0])]
entity_tup = (syn1,syn2)
data[entity_tup] = rel
curr_count+=1
TotalRelCount+=1
print(str(rel)+" :"+str(curr_count)+" relationships")
ReltoCount[rel]=curr_count
print("There are a total of "+str(TotalRelCount)+" relationship triplets.")
#Do hyperbolic KBC for 10-dimensional embeddings for each relationship.
import numpy as np
import hyp_functions as hyp
vector_dim = 10
ReltoW = defaultdict()
for rel in RelEmbDict.keys():
emb_dict_curr = RelEmbDict[rel]
vocab_size = len(emb_dict_curr)
W_curr = np.zeros((vocab_size, vector_dim))
SynsettoIDs_curr = Ref_SynsToIDs_f[rel]
for idx, vec in emb_dict_curr.items():
W_curr[idx,:] = vec
ReltoW[rel] = W_curr
TruePosAll = 0
ReltoCorrectCount = {}
ReltoPosCount = {}
#Just initialize.
for rel in RelEmbDict.keys():
ReltoCorrectCount[rel]=0
ReltoPosCount[rel]=0
MultRel=0
AccMultRel=0
for tup, rel in data.items():
e1 = tup[0]
e2 = tup[1]
ReltoDist = {}
for r, W in ReltoW.items():
SynsettoIDs_curr = Ref_SynsToIDs_f[r]
emb_dict_curr = RelEmbDict[r]
relTau = RelTauDict[r]
relTau = np.float64(relTau)
if (e1 in SynsettoIDs_curr) and (e2 in SynsettoIDs_curr):
vec_e1 = W[SynsettoIDs_curr[e1],:]
vec_e2 = W[SynsettoIDs_curr[e2],:]
ReltoDist[r] = (hyp.dist(vec_e1,vec_e2))/relTau
pred = min(ReltoDist, key=ReltoDist.get)
ReltoPosCount[pred]+=1
if len(ReltoDist)>1:
MultRel+=1
if pred==rel:
AccMultRel+=1
curr_dist = ReltoDist[pred]
if (curr_dist>0.99) and (curr_dist<1.01):
TruePosAll+=1
ReltoCorrectCount[rel]+=1
for rel in ReltoCorrectCount.keys():
correct_count = ReltoCorrectCount[rel]
total_count = ReltoCount[rel]
pos_count = ReltoPosCount[rel]
print(str(rel)+":")
print("Precision: "+str(correct_count/pos_count))
print("Recall: "+str(correct_count/total_count))
print("\n")
print("Number of tuples involved in more than one relationship: " + str(MultRel))
print("Overall accuracy for that: "+str(AccMultRel/MultRel))
# Find the top 10 nearest neighbor to a particular synset for each rel.
import hyp_functions as hyp
vector_dim = 10
rel = 'topic_domain'
emb_dict_curr = RelEmbDict[rel]
vocab_size = len(emb_dict_curr)
W = np.zeros((vocab_size, vector_dim))
relTau = RelTauDict[rel]
e1 = wn.synset('geometry.n.01')
e1_idx = SynsToIDs_f[e1]
for idx, vec in emb_dict_curr.items():
W[idx,:] = vec
vec_e1 = emb_dict_curr[e1_idx]
curr_dist = []
for row_idx in range(W.shape[0]):
curr_vec = W[row_idx,:]
normalized_dist = (hyp.dist(curr_vec,vec_e1))/relTau
curr_dist.append(normalized_dist)
curr_dist[e1_idx] = np.Inf
curr_closest_indices = np.argsort(curr_dist)[:2]
for r_idx in curr_closest_indices:
relev_syn = IDsToSyns_f[r_idx]
print(curr_dist[r_idx], relev_syn.name(), relev_syn.definition())
# Word analogy for a example synsets for each rel.
import hyp_functions as hyp
vector_dim = 10
rel = 'member_holonyms'
emb_dict_curr = RelEmbDict[rel]
vocab_size = len(emb_dict_curr)
W = np.zeros((vocab_size, vector_dim))
relTau = RelTauDict[rel]
# Choose the entities.
e1 = wn.synset('african_elephant.n.01')
e1_idx = SynsToIDs_f[e1]
e2 = wn.synset('elephantidae.n.01')
e2_idx = SynsToIDs_f[e2]
e3 = wn.synset('dog.n.01')
e3_idx = SynsToIDs_f[e3]
for idx, vec in emb_dict_curr.items():
W[idx,:] = vec
vec_e1 = emb_dict_curr[e1_idx]
vec_e2 = emb_dict_curr[e2_idx]
vec_e3 = emb_dict_curr[e3_idx]
vec1_ = hyp.hyp_scale(-1, vec_e1)
left_sum = hyp.hyp_weighted_sum(1, 1, vec_e2, vec1_)
print("Print distance between e1 and e2")
vec_search = hyp.hyp_weighted_sum(1, 1, left_sum, vec_e3)
curr_dist = []
for row_idx in range(W.shape[0]):
curr_vec = W[row_idx,:]
normalized_dist = (hyp.dist(curr_vec, vec_search))/relTau
curr_dist.append(normalized_dist)
curr_dist[e1_idx] = np.Inf
curr_dist[e2_idx] = np.Inf
curr_dist[e3_idx] = np.Inf
curr_closest_indices = np.argsort(curr_dist)[:10]
for r_idx in curr_closest_indices:
relev_syn = IDsToSyns_f[r_idx]
print(curr_dist[r_idx], relev_syn.name(), relev_syn.definition())
# Write word embeddings in the GloVe format for the largest cc for each relationship.
# Get the most frequent meaning for each word (if it's involved in multiple synsets)
rel = "topic_domain"
emb_dict_curr = RelEmbDict[rel]
WordtoVec = dict()
WordtoLemma = dict()
for idx in IDsToSyns_f.keys():
syn = IDsToSyns_f[idx]
vec = emb_dict_curr[idx]
for curr_lemma in syn.lemmas():
word = curr_lemma.name()
if word not in WordtoVec.keys():
WordtoVec[word] = vec
WordtoLemma[word] = curr_lemma
if (word in WordtoVec.keys()) and (curr_lemma.count()>WordtoLemma[word].count()):
WordtoVec[word] = vec
WordtoLemma[word] = curr_lemma
print("There were "+str(len(IDsToSyns_f))+" synsets")
print("There are "+str(len(WordtoVec))+" words now")
lines = []
for word in WordtoVec.keys():
curr_line = str(word) + " " + " ".join(list(map(str,WordtoVec[word])))
lines.append(curr_line)
with open('wordnet_word_emb/domain_topic.txt', 'w') as f:
f.write('\n'.join(lines))
| hyperE-master | preprocess/wordnet_preprocess.py |
# read from a list of artists and get albums and songs from musicbrainz
import argh
import urllib.request as req
import numpy as np
from xml.etree import ElementTree as ET
# songs will overlap in title:
def check_song_name(song_dict, song_name, idx):
sn = song_name
i = 1
while sn in song_dict:
i += 1
sn = song_name + "_" + str(i)
song_dict[sn] = idx
return sn
# urls we need for queries:
url_base = 'https://musicbrainz.org/ws/2/artist/?query='
url_base_rec = 'https://musicbrainz.org/ws/2/recording?query=arid:'
offset_str = '&offset='
limit_str = '&limit=100'
# xpath stuff
sch = '{http://musicbrainz.org/ns/mmd-2.0#}'
artls = 'artist-list'
art = 'artist'
recls = 'recording-list'
recs = 'recording'
rells = 'release-list'
rel = 'release'
schrecs = sch+recls+'/'+sch+recs
schrels = sch+rells+'/'+sch+rel
schart = sch+artls+'/'+sch+art
schrecolist = sch+recls
@argh.arg("-s", "--songsmax", help="Max. number of songs per artist to find")
@argh.arg("-a", "--artistlist", help="Input file of artists")
@argh.arg("-o", "--outputinfo", help="Output file of information")
@argh.arg("-e", "--edgelist", help="Edge list file output")
def buildmusic(artistlist="data/artists.txt", outputinfo="data/music_info.txt", songsmax=100, edgelist="edges/music.edges"):
fp = open(artistlist, 'r')
fp2 = open(outputinfo, 'w')
# build catalogs and edgelist for embedding
dict_artists = open('dicts/dict_artists.txt', 'w')
dict_albums = open('dicts/dict_albums.txt', 'w')
dict_songs = open('dicts/dict_songs.txt', 'w')
edge_lists = open(edgelist, 'w')
album_dict = {}
song_dict = {}
idx = 1
for line in fp:
words = line.split()
artist_str = '\%20'.join(words)
print("Getting music by ", artist_str)
# let's download stuff:
html = req.urlopen(url_base+artist_str).read()
# getting the relevant artist is tricky; we just grab the first and hope it's right
r = ET.fromstring(html)
#artists = r.findall(schart)
artist = r.find(schart)
n_found = len(artist)
'''
# possible we found several artists, so disambiguate them:
for artist in artists:
# by exact name?
artist_name = artist.find(sch + 'name')
if artist_name.text == artist_str:
break
diso = artist.find(sch+'disambiguation')
if diso is not None:
dis = diso.text
if dis.find('rapper')!=-1 or dis.find('Rapper')!=-1:
break
'''
if n_found > 0:
# found an artist:
artist_name = artist.find(sch + 'name')
artist_id = artist.attrib['id']
artist_idx = idx
idx += 1
dict_artists.write(artist_name.text + "\t" + str(artist_idx) + "\n")
# implicit forest embedding
edge_lists.write('0' + "\t" + str(artist_idx) + "\t" + '10' + "\n")
# now let's get their songs:
tot_hits = np.Inf
song_offset = 0
while song_offset < songsmax and song_offset < tot_hits:
# no offset for first call:
if song_offset == 0:
html2 = req.urlopen(url_base_rec+artist_id+limit_str).read()
rec_r = ET.fromstring(html2)
# get the number of hits:
rec_list = rec_r.find(schrecolist)
tot_hits = int(rec_list.attrib['count'])
else:
html2 = req.urlopen(url_base_rec+artist_id+limit_str+offset_str+str(song_offset)).read()
rec_r = ET.fromstring(html2)
song_offset += 100
# get their songs:
recordings = rec_r.findall(schrecs)
for record in recordings:
song_name = record.find(sch+'title')
sn = song_name.text
# try to find the albums corresponding to each song:
album = record.find(schrels)
if album:
song_idx = idx
sn = check_song_name(song_dict, sn, song_idx)
dict_songs.write(sn + "\t" + str(song_idx) + "\n")
idx += 1
album_name = album.find(sch+'title')
if album_name.text not in album_dict:
album_dict[album_name.text] = idx
idx += 1
dict_albums.write(album_name.text + "\t" + str(album_dict[album_name.text]) + "\n")
edge_lists.write(str(artist_idx) + "\t" + str(album_dict[album_name.text]) + "\t" + '1' + "\n")
edge_lists.write(str(album_dict[album_name.text]) + "\t" + str(song_idx) + "\t" + '2' + "\n")
# write everything in format ARTIST TAB ALBUM TAB SONG for future reference
fp2.write(artist_name.text + "\t" + album_name.text + "\t" + sn + "\n")
fp.close()
fp2.close()
dict_artists.close()
dict_albums.close()
dict_songs.close()
edge_lists.close()
return
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.add_commands([buildmusic])
_parser.dispatch()
| hyperE-master | preprocess/musicbrainz_preprocess.py |
import nltk
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
import operator
from collections import defaultdict
import numpy as np
import networkx as nx
import json
from collections import defaultdict
# Some definitions
def acosh(x):
return np.log(x + np.sqrt(x**2-1))
# Hyperbolic distance
def dist(u,v):
z = 2 * np.linalg.norm(u-v)**2
uu = 1. + z/((1-np.linalg.norm(u)**2)*(1-np.linalg.norm(v)**2))
return acosh(uu)
# Hyperbolic distance from 0
def hyp_dist_origin(x):
return np.log((1+np.linalg.norm(x))/(1-np.linalg.norm(x)))
def make_edge_set(): return ([],([],[]))
def add_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(1)
files = {
'wikidata_data/student_of.tsv':'student_of',
'wikidata_data/member_of.tsv':'member_of',
'wikidata_data/part_of.tsv':'part_of',
}
Ref_QtoLabel_f = defaultdict(dict)
Ref_QtoIDs_f = defaultdict(dict)
Ref_IDtoQs_f = defaultdict(dict)
for file,rel in files.items():
with open(file, "r") as data:
data_lines = data.readlines()
data_lines = data_lines[1:]
QtoLabel = dict()
QtoIDs = defaultdict()
IDtoQs = dict()
e = make_edge_set()
counter = 0
triple_count = 0
for line in data_lines:
curr_line = line.split("\t")
item = (curr_line[0].split("/"))[-1]
# itemLabel = (curr_line[1].split("/"))[-1]
influenced_by = (curr_line[1].split("/"))[-1]
influenced_byLabel = (curr_line[2].split("/"))[-1].split("\n")[0]
if influenced_by not in QtoLabel.keys():
QtoLabel[influenced_by] = influenced_byLabel
if item not in QtoIDs.keys():
QtoIDs[item] = counter
IDtoQs[counter] = item
counter+=1
if influenced_by not in QtoIDs.keys():
QtoIDs[influenced_by] = counter
IDtoQs[counter] = influenced_by
add_edge(e,QtoIDs[item], QtoIDs[influenced_by])
add_edge(e,QtoIDs[influenced_by], QtoIDs[item])
triple_count+=1
print("There are a total of "+str(triple_count)+" triples for the relationship "+str(rel)+".")
# Take the largest lcc for the relationship.
n = len(QtoIDs)
X = csr_matrix(e, shape=(n, n))
G = nx.from_scipy_sparse_matrix(X)
Gc = max(nx.connected_component_subgraphs(G), key=len)
print("Total number of unique entities: "+str(G.number_of_nodes()))
print("Total number of nodes in lcc: "+str(Gc.number_of_nodes()))
Gc_final = nx.convert_node_labels_to_integers(Gc, ordering="decreasing degree", label_attribute="old_label")
#Create the dict for old-id <-> new-id matching for syns
RefDict = Gc_final.node
IDtoQs_f = dict()
QtoIDs_f = dict()
for new_idx in RefDict.keys():
old_idx = RefDict[new_idx]['old_label']
curr_Q = IDtoQs[old_idx]
IDtoQs_f[new_idx] = curr_Q
QtoIDs_f[curr_Q] = new_idx
#Write the final edgelist.
nx.write_edgelist(Gc_final, "wikidata_edges/"+str(rel)+"/lcc.edges",data=False)
#Take the labels only in the lcc.
keys_a = set(QtoLabel.keys())
keys_b = set(QtoIDs_f.keys())
intersection = keys_a & keys_b
QtoLabel_f = dict()
for item in intersection:
QtoLabel_f[item] = QtoLabel[item]
Ref_QtoLabel_f[rel] = QtoLabel_f
Ref_QtoIDs_f[rel] = QtoIDs_f
Ref_IDtoQs_f[rel] = IDtoQs_f
| hyperE-master | preprocess/wikidata_preprocess.py |
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.Autoencoder import Autoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = Autoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| models-master | autoencoder/AutoencoderRunner.py |
models-master | autoencoder/__init__.py |
|
import numpy as np
import tensorflow as tf
def xavier_init(fan_in, fan_out, constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval = low, maxval = high,
dtype = tf.float32)
| models-master | autoencoder/Utils.py |
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
scale = 0.01)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| models-master | autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py |
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| models-master | autoencoder/VariationalAutoencoderRunner.py |
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
autoencoder = MaskingNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
dropout_probability = 0.95)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| models-master | autoencoder/MaskingNoiseAutoencoderRunner.py |
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class VariationalAutoencoder(object):
def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
# sample from gaussian distribution
eps = tf.random_normal(tf.pack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['log_sigma_w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.z_mean: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
| models-master | autoencoder/autoencoder_models/VariationalAutoencoder.py |
models-master | autoencoder/autoencoder_models/__init__.py |
|
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class Autoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
| models-master | autoencoder/autoencoder_models/Autoencoder.py |
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class AdditiveGaussianNoiseAutoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
scale = 0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
self.weights['w1']),
self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x: X,
self.scale: self.training_scale
})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X,
self.scale: self.training_scale
})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict = {self.x: X,
self.scale: self.training_scale
})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size = self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict = {self.x: X,
self.scale: self.training_scale
})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
class MaskingNoiseAutoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
dropout_probability = 0.95):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.dropout_probability = dropout_probability
self.keep_prob = tf.placeholder(tf.float32)
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']),
self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer),
feed_dict = {self.x: X, self.keep_prob: self.dropout_probability})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X, self.keep_prob: 1.0})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict = {self.x: X, self.keep_prob: 1.0})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size = self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict = {self.x: X, self.keep_prob: 1.0})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
| models-master | autoencoder/autoencoder_models/DenoisingAutoencoder.py |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Submatrix-wise Vector Embedding Learner.
Implementation of SwiVel algorithm described at:
http://arxiv.org/abs/1602.02215
This program expects an input directory that contains the following files.
row_vocab.txt, col_vocab.txt
The row an column vocabulary files. Each file should contain one token per
line; these will be used to generate a tab-separate file containing the
trained embeddings.
row_sums.txt, col_sum.txt
The matrix row and column marginal sums. Each file should contain one
decimal floating point number per line which corresponds to the marginal
count of the matrix for that row or column.
shards.recs
A file containing the sub-matrix shards, stored as TFRecords. Each shard is
expected to be a serialzed tf.Example protocol buffer with the following
properties:
global_row: the global row indicies contained in the shard
global_col: the global column indicies contained in the shard
sparse_local_row, sparse_local_col, sparse_value: three parallel arrays
that are a sparse representation of the submatrix counts.
It will generate embeddings, training from the input directory for the specified
number of epochs. When complete, it will output the trained vectors to a
tab-separated file that contains one line per embedding. Row and column
embeddings are stored in separate files.
"""
import argparse
import glob
import math
import os
import sys
import time
import threading
import numpy as np
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_string('input_base_path', '/tmp/swivel_data',
'Directory containing input shards, vocabularies, '
'and marginals.')
flags.DEFINE_string('output_base_path', '/tmp/swivel_data',
'Path where to write the trained embeddings.')
flags.DEFINE_integer('embedding_size', 300, 'Size of the embeddings')
flags.DEFINE_boolean('trainable_bias', False, 'Biases are trainable')
flags.DEFINE_integer('submatrix_rows', 4096, 'Rows in each training submatrix. '
'This must match the training data.')
flags.DEFINE_integer('submatrix_cols', 4096, 'Rows in each training submatrix. '
'This must match the training data.')
flags.DEFINE_float('loss_multiplier', 1.0 / 4096,
'constant multiplier on loss.')
flags.DEFINE_float('confidence_exponent', 0.5,
'Exponent for l2 confidence function')
flags.DEFINE_float('confidence_scale', 0.25, 'Scale for l2 confidence function')
flags.DEFINE_float('confidence_base', 0.1, 'Base for l2 confidence function')
flags.DEFINE_float('learning_rate', 1.0, 'Initial learning rate')
flags.DEFINE_integer('num_concurrent_steps', 2,
'Number of threads to train with')
flags.DEFINE_float('num_epochs', 40, 'Number epochs to train for')
flags.DEFINE_float('per_process_gpu_memory_fraction', 0.25,
'Fraction of GPU memory to use')
FLAGS = flags.FLAGS
def embeddings_with_init(vocab_size, embedding_dim, name):
"""Creates and initializes the embedding tensors."""
return tf.get_variable(name=name,
shape=[vocab_size, embedding_dim],
initializer=tf.random_normal_initializer(
stddev=math.sqrt(1.0 / embedding_dim)))
def count_matrix_input(filenames, submatrix_rows, submatrix_cols):
"""Reads submatrix shards from disk."""
filename_queue = tf.train.string_input_producer(filenames)
reader = tf.WholeFileReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
'sparse_value': tf.VarLenFeature(dtype=tf.float32)
})
global_row = features['global_row']
global_col = features['global_col']
sparse_local_row = features['sparse_local_row'].values
sparse_local_col = features['sparse_local_col'].values
sparse_count = features['sparse_value'].values
sparse_indices = tf.concat(1, [tf.expand_dims(sparse_local_row, 1),
tf.expand_dims(sparse_local_col, 1)])
count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
sparse_count)
queued_global_row, queued_global_col, queued_count = tf.train.batch(
[global_row, global_col, count],
batch_size=1,
num_threads=4,
capacity=32)
queued_global_row = tf.reshape(queued_global_row, [submatrix_rows])
queued_global_col = tf.reshape(queued_global_col, [submatrix_cols])
queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols])
return queued_global_row, queued_global_col, queued_count
def read_marginals_file(filename):
"""Reads text file with one number per line to an array."""
with open(filename) as lines:
return [float(line) for line in lines]
def write_embedding_tensor_to_disk(vocab_path, output_path, sess, embedding):
"""Writes tensor to output_path as tsv"""
# Fetch the embedding values from the model
embeddings = sess.run(embedding)
with open(output_path, 'w') as out_f:
with open(vocab_path) as vocab_f:
for index, word in enumerate(vocab_f):
word = word.strip()
embedding = embeddings[index]
out_f.write(word + '\t' + '\t'.join([str(x) for x in embedding]) + '\n')
def write_embeddings_to_disk(config, model, sess):
"""Writes row and column embeddings disk"""
# Row Embedding
row_vocab_path = config.input_base_path + '/row_vocab.txt'
row_embedding_output_path = config.output_base_path + '/row_embedding.tsv'
print 'Writing row embeddings to:', row_embedding_output_path
write_embedding_tensor_to_disk(row_vocab_path, row_embedding_output_path,
sess, model.row_embedding)
# Column Embedding
col_vocab_path = config.input_base_path + '/col_vocab.txt'
col_embedding_output_path = config.output_base_path + '/col_embedding.tsv'
print 'Writing column embeddings to:', col_embedding_output_path
write_embedding_tensor_to_disk(col_vocab_path, col_embedding_output_path,
sess, model.col_embedding)
class SwivelModel(object):
"""Small class to gather needed pieces from a Graph being built."""
def __init__(self, config):
"""Construct graph for dmc."""
self._config = config
# Create paths to input data files
print 'Reading model from:', config.input_base_path
count_matrix_files = glob.glob(config.input_base_path + '/shard-*.pb')
row_sums_path = config.input_base_path + '/row_sums.txt'
col_sums_path = config.input_base_path + '/col_sums.txt'
# Read marginals
row_sums = read_marginals_file(row_sums_path)
col_sums = read_marginals_file(col_sums_path)
self.n_rows = len(row_sums)
self.n_cols = len(col_sums)
print 'Matrix dim: (%d,%d) SubMatrix dim: (%d,%d) ' % (
self.n_rows, self.n_cols, config.submatrix_rows, config.submatrix_cols)
self.n_submatrices = (self.n_rows * self.n_cols /
(config.submatrix_rows * config.submatrix_cols))
print 'n_submatrices: %d' % (self.n_submatrices)
# ===== CREATE VARIABLES ======
with tf.device('/cpu:0'):
# embeddings
self.row_embedding = embeddings_with_init(
embedding_dim=config.embedding_size,
vocab_size=self.n_rows,
name='row_embedding')
self.col_embedding = embeddings_with_init(
embedding_dim=config.embedding_size,
vocab_size=self.n_cols,
name='col_embedding')
tf.histogram_summary('row_emb', self.row_embedding)
tf.histogram_summary('col_emb', self.col_embedding)
matrix_log_sum = math.log(np.sum(row_sums) + 1)
row_bias_init = [math.log(x + 1) for x in row_sums]
col_bias_init = [math.log(x + 1) for x in col_sums]
self.row_bias = tf.Variable(row_bias_init,
trainable=config.trainable_bias)
self.col_bias = tf.Variable(col_bias_init,
trainable=config.trainable_bias)
tf.histogram_summary('row_bias', self.row_bias)
tf.histogram_summary('col_bias', self.col_bias)
# ===== CREATE GRAPH =====
# Get input
with tf.device('/cpu:0'):
global_row, global_col, count = count_matrix_input(
count_matrix_files, config.submatrix_rows, config.submatrix_cols)
# Fetch embeddings.
selected_row_embedding = tf.nn.embedding_lookup(self.row_embedding,
global_row)
selected_col_embedding = tf.nn.embedding_lookup(self.col_embedding,
global_col)
# Fetch biases.
selected_row_bias = tf.nn.embedding_lookup([self.row_bias], global_row)
selected_col_bias = tf.nn.embedding_lookup([self.col_bias], global_col)
# Multiply the row and column embeddings to generate predictions.
predictions = tf.matmul(
selected_row_embedding, selected_col_embedding, transpose_b=True)
# These binary masks separate zero from non-zero values.
count_is_nonzero = tf.to_float(tf.cast(count, tf.bool))
count_is_zero = 1 - tf.to_float(tf.cast(count, tf.bool))
objectives = count_is_nonzero * tf.log(count + 1e-30)
objectives -= tf.reshape(selected_row_bias, [config.submatrix_rows, 1])
objectives -= selected_col_bias
objectives += matrix_log_sum
err = predictions - objectives
# The confidence function scales the L2 loss based on the raw co-occurrence
# count.
l2_confidence = (config.confidence_base + config.confidence_scale * tf.pow(
count, config.confidence_exponent))
l2_loss = config.loss_multiplier * tf.reduce_sum(
0.5 * l2_confidence * err * err * count_is_nonzero)
sigmoid_loss = config.loss_multiplier * tf.reduce_sum(
tf.nn.softplus(err) * count_is_zero)
self.loss = l2_loss + sigmoid_loss
tf.scalar_summary("l2_loss", l2_loss)
tf.scalar_summary("sigmoid_loss", sigmoid_loss)
tf.scalar_summary("loss", self.loss)
# Add optimizer.
self.global_step = tf.Variable(0, name='global_step')
opt = tf.train.AdagradOptimizer(config.learning_rate)
self.train_op = opt.minimize(self.loss, global_step=self.global_step)
self.saver = tf.train.Saver(sharded=True)
def main(_):
# Create the output path. If this fails, it really ought to fail
# now. :)
if not os.path.isdir(FLAGS.output_base_path):
os.makedirs(FLAGS.output_base_path)
# Create and run model
with tf.Graph().as_default():
model = SwivelModel(FLAGS)
# Create a session for running Ops on the Graph.
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=FLAGS.per_process_gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# Run the Op to initialize the variables.
sess.run(tf.initialize_all_variables())
# Start feeding input
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Calculate how many steps each thread should run
n_total_steps = int(FLAGS.num_epochs * model.n_rows * model.n_cols) / (
FLAGS.submatrix_rows * FLAGS.submatrix_cols)
n_steps_per_thread = n_total_steps / FLAGS.num_concurrent_steps
n_submatrices_to_train = model.n_submatrices * FLAGS.num_epochs
t0 = [time.time()]
def TrainingFn():
for _ in range(n_steps_per_thread):
_, global_step = sess.run([model.train_op, model.global_step])
n_steps_between_status_updates = 100
if (global_step % n_steps_between_status_updates) == 0:
elapsed = float(time.time() - t0[0])
print '%d/%d submatrices trained (%.1f%%), %.1f submatrices/sec' % (
global_step, n_submatrices_to_train,
100.0 * global_step / n_submatrices_to_train,
n_steps_between_status_updates / elapsed)
t0[0] = time.time()
# Start training threads
train_threads = []
for _ in range(FLAGS.num_concurrent_steps):
t = threading.Thread(target=TrainingFn)
train_threads.append(t)
t.start()
# Wait for threads to finish.
for t in train_threads:
t.join()
coord.request_stop()
coord.join(threads)
# Write out vectors
write_embeddings_to_disk(FLAGS, model, sess)
#Shutdown
sess.close()
if __name__ == '__main__':
tf.app.run()
| models-master | swivel/swivel.py |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepare a corpus for processing by swivel.
Creates a sharded word co-occurrence matrix from a text file input corpus.
Usage:
prep.py --output_dir <output-dir> --input <text-file>
Options:
--input <filename>
The input text.
--output_dir <directory>
Specifies the output directory where the various Swivel data
files should be placed.
--shard_size <int>
Specifies the shard size; default 4096.
--min_count <int>
Specifies the minimum number of times a word should appear
to be included in the vocabulary; default 5.
--max_vocab <int>
Specifies the maximum vocabulary size; default shard size
times 1024.
--vocab <filename>
Use the specified unigram vocabulary instead of generating
it from the corpus.
--window_size <int>
Specifies the window size for computing co-occurrence stats;
default 10.
--bufsz <int>
The number of co-occurrences that are buffered; default 16M.
"""
import itertools
import math
import os
import struct
import sys
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_string('input', '', 'The input text.')
flags.DEFINE_string('output_dir', '/tmp/swivel_data',
'Output directory for Swivel data')
flags.DEFINE_integer('shard_size', 4096, 'The size for each shard')
flags.DEFINE_integer('min_count', 5,
'The minimum number of times a word should occur to be '
'included in the vocabulary')
flags.DEFINE_integer('max_vocab', 4096 * 64, 'The maximum vocabulary size')
flags.DEFINE_string('vocab', '', 'Vocabulary to use instead of generating one')
flags.DEFINE_integer('window_size', 10, 'The window size')
flags.DEFINE_integer('bufsz', 16 * 1024 * 1024,
'The number of co-occurrences to buffer')
FLAGS = flags.FLAGS
shard_cooc_fmt = struct.Struct('iif')
def words(line):
"""Splits a line of text into tokens."""
return line.strip().split()
def create_vocabulary(lines):
"""Reads text lines and generates a vocabulary."""
lines.seek(0, os.SEEK_END)
nbytes = lines.tell()
lines.seek(0, os.SEEK_SET)
vocab = {}
for lineno, line in enumerate(lines, start=1):
for word in words(line):
vocab.setdefault(word, 0)
vocab[word] += 1
if lineno % 100000 == 0:
pos = lines.tell()
sys.stdout.write('\rComputing vocabulary: %0.1f%% (%d/%d)...' % (
100.0 * pos / nbytes, pos, nbytes))
sys.stdout.flush()
sys.stdout.write('\n')
vocab = [(tok, n) for tok, n in vocab.iteritems() if n >= FLAGS.min_count]
vocab.sort(key=lambda kv: (-kv[1], kv[0]))
num_words = max(len(vocab), FLAGS.shard_size)
num_words = min(len(vocab), FLAGS.max_vocab)
if num_words % FLAGS.shard_size != 0:
num_words -= num_words % FLAGS.shard_size
if not num_words:
raise Exception('empty vocabulary')
print 'vocabulary contains %d tokens' % num_words
vocab = vocab[:num_words]
return [tok for tok, n in vocab]
def write_vocab_and_sums(vocab, sums, vocab_filename, sums_filename):
"""Writes vocabulary and marginal sum files."""
with open(os.path.join(FLAGS.output_dir, vocab_filename), 'w') as vocab_out:
with open(os.path.join(FLAGS.output_dir, sums_filename), 'w') as sums_out:
for tok, cnt in itertools.izip(vocab, sums):
print >> vocab_out, tok
print >> sums_out, cnt
def compute_coocs(lines, vocab):
"""Compute the co-occurrence statistics from the text.
This generates a temporary file for each shard that contains the intermediate
counts from the shard: these counts must be subsequently sorted and collated.
"""
word_to_id = {tok: idx for idx, tok in enumerate(vocab)}
lines.seek(0, os.SEEK_END)
nbytes = lines.tell()
lines.seek(0, os.SEEK_SET)
num_shards = len(vocab) / FLAGS.shard_size
shardfiles = {}
for row in range(num_shards):
for col in range(num_shards):
filename = os.path.join(
FLAGS.output_dir, 'shard-%03d-%03d.tmp' % (row, col))
shardfiles[(row, col)] = open(filename, 'w+')
def flush_coocs():
for (row_id, col_id), cnt in coocs.iteritems():
row_shard = row_id % num_shards
row_off = row_id / num_shards
col_shard = col_id % num_shards
col_off = col_id / num_shards
# Since we only stored (a, b), we emit both (a, b) and (b, a).
shardfiles[(row_shard, col_shard)].write(
shard_cooc_fmt.pack(row_off, col_off, cnt))
shardfiles[(col_shard, row_shard)].write(
shard_cooc_fmt.pack(col_off, row_off, cnt))
coocs = {}
sums = [0.0] * len(vocab)
for lineno, line in enumerate(lines, start=1):
# Computes the word IDs for each word in the sentence. This has the effect
# of "stretching" the window past OOV tokens.
wids = filter(
lambda wid: wid is not None,
(word_to_id.get(w) for w in words(line)))
for pos in xrange(len(wids)):
lid = wids[pos]
window_extent = min(FLAGS.window_size + 1, len(wids) - pos)
for off in xrange(1, window_extent):
rid = wids[pos + off]
pair = (min(lid, rid), max(lid, rid))
count = 1.0 / off
sums[lid] += count
sums[rid] += count
coocs.setdefault(pair, 0.0)
coocs[pair] += count
sums[lid] += 1.0
pair = (lid, lid)
coocs.setdefault(pair, 0.0)
coocs[pair] += 0.5 # Only add 1/2 since we output (a, b) and (b, a)
if lineno % 10000 == 0:
pos = lines.tell()
sys.stdout.write('\rComputing co-occurrences: %0.1f%% (%d/%d)...' % (
100.0 * pos / nbytes, pos, nbytes))
sys.stdout.flush()
if len(coocs) > FLAGS.bufsz:
flush_coocs()
coocs = {}
flush_coocs()
sys.stdout.write('\n')
return shardfiles, sums
def write_shards(vocab, shardfiles):
"""Processes the temporary files to generate the final shard data.
The shard data is stored as a tf.Example protos using a TFRecordWriter. The
temporary files are removed from the filesystem once they've been processed.
"""
num_shards = len(vocab) / FLAGS.shard_size
ix = 0
for (row, col), fh in shardfiles.iteritems():
ix += 1
sys.stdout.write('\rwriting shard %d/%d' % (ix, len(shardfiles)))
sys.stdout.flush()
# Read the entire binary co-occurrence and unpack it into an array.
fh.seek(0)
buf = fh.read()
os.unlink(fh.name)
fh.close()
coocs = [
shard_cooc_fmt.unpack_from(buf, off)
for off in range(0, len(buf), shard_cooc_fmt.size)]
# Sort and merge co-occurrences for the same pairs.
coocs.sort()
if coocs:
current_pos = 0
current_row_col = (coocs[current_pos][0], coocs[current_pos][1])
for next_pos in range(1, len(coocs)):
next_row_col = (coocs[next_pos][0], coocs[next_pos][1])
if current_row_col == next_row_col:
coocs[current_pos] = (
coocs[current_pos][0],
coocs[current_pos][1],
coocs[current_pos][2] + coocs[next_pos][2])
else:
current_pos += 1
if current_pos < next_pos:
coocs[current_pos] = coocs[next_pos]
current_row_col = (coocs[current_pos][0], coocs[current_pos][1])
coocs = coocs[:(1 + current_pos)]
# Convert to a TF Example proto.
def _int64s(xs):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(xs)))
def _floats(xs):
return tf.train.Feature(float_list=tf.train.FloatList(value=list(xs)))
example = tf.train.Example(features=tf.train.Features(feature={
'global_row': _int64s(
row + num_shards * i for i in range(FLAGS.shard_size)),
'global_col': _int64s(
col + num_shards * i for i in range(FLAGS.shard_size)),
'sparse_local_row': _int64s(cooc[0] for cooc in coocs),
'sparse_local_col': _int64s(cooc[1] for cooc in coocs),
'sparse_value': _floats(cooc[2] for cooc in coocs),
}))
filename = os.path.join(FLAGS.output_dir, 'shard-%03d-%03d.pb' % (row, col))
with open(filename, 'w') as out:
out.write(example.SerializeToString())
sys.stdout.write('\n')
def main(_):
# Create the output directory, if necessary
if FLAGS.output_dir and not os.path.isdir(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
# Read the file onces to create the vocabulary.
if FLAGS.vocab:
with open(FLAGS.vocab, 'r') as lines:
vocab = [line.strip() for line in lines]
else:
with open(FLAGS.input, 'r') as lines:
vocab = create_vocabulary(lines)
# Now read the file again to determine the co-occurrence stats.
with open(FLAGS.input, 'r') as lines:
shardfiles, sums = compute_coocs(lines, vocab)
# Collect individual shards into the shards.recs file.
write_shards(vocab, shardfiles)
# Now write the marginals. They're symmetric for this application.
write_vocab_and_sums(vocab, sums, 'row_vocab.txt', 'row_sums.txt')
write_vocab_and_sums(vocab, sums, 'col_vocab.txt', 'col_sums.txt')
print 'done!'
if __name__ == '__main__':
tf.app.run()
| models-master | swivel/prep.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmap
import numpy as np
import os
import struct
class Vecs(object):
def __init__(self, vocab_filename, rows_filename, cols_filename=None):
"""Initializes the vectors from a text vocabulary and binary data."""
with open(vocab_filename, 'r') as lines:
self.vocab = [line.split()[0] for line in lines]
self.word_to_idx = {word: idx for idx, word in enumerate(self.vocab)}
n = len(self.vocab)
with open(rows_filename, 'r') as rows_fh:
rows_fh.seek(0, os.SEEK_END)
size = rows_fh.tell()
# Make sure that the file size seems reasonable.
if size % (4 * n) != 0:
raise IOError(
'unexpected file size for binary vector file %s' % rows_filename)
# Memory map the rows.
dim = size / (4 * n)
rows_mm = mmap.mmap(rows_fh.fileno(), 0, prot=mmap.PROT_READ)
rows = np.matrix(
np.frombuffer(rows_mm, dtype=np.float32).reshape(n, dim))
# If column vectors were specified, then open them and add them to the row
# vectors.
if cols_filename:
with open(cols_filename, 'r') as cols_fh:
cols_mm = mmap.mmap(cols_fh.fileno(), 0, prot=mmap.PROT_READ)
cols_fh.seek(0, os.SEEK_END)
if cols_fh.tell() != size:
raise IOError('row and column vector files have different sizes')
cols = np.matrix(
np.frombuffer(cols_mm, dtype=np.float32).reshape(n, dim))
rows += cols
cols_mm.close()
# Normalize so that dot products are just cosine similarity.
self.vecs = rows / np.linalg.norm(rows, axis=1).reshape(n, 1)
rows_mm.close()
def similarity(self, word1, word2):
"""Computes the similarity of two tokens."""
idx1 = self.word_to_idx.get(word1)
idx2 = self.word_to_idx.get(word2)
if not idx1 or not idx2:
return None
return float(self.vecs[idx1] * self.vecs[idx2].transpose())
def neighbors(self, query):
"""Returns the nearest neighbors to the query (a word or vector)."""
if isinstance(query, basestring):
idx = self.word_to_idx.get(query)
if idx is None:
return None
query = self.vecs[idx]
neighbors = self.vecs * query.transpose()
return sorted(
zip(self.vocab, neighbors.flat),
key=lambda kv: kv[1], reverse=True)
def lookup(self, word):
"""Returns the embedding for a token, or None if no embedding exists."""
idx = self.word_to_idx.get(word)
return None if idx is None else self.vecs[idx]
| models-master | swivel/vecs.py |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes Spearman's rho with respect to human judgements.
Given a set of row (and potentially column) embeddings, this computes Spearman's
rho between the rank ordering of predicted word similarity and human judgements.
Usage:
wordim.py --embeddings=<binvecs> --vocab=<vocab> eval1.tab eval2.tab ...
Options:
--embeddings=<filename>: the vectors to test
--vocab=<filename>: the vocabulary file
Evaluation files are assumed to be tab-separated files with exactly three
columns. The first two columns contain the words, and the third column contains
the scored human judgement.
"""
import scipy.stats
import sys
from getopt import GetoptError, getopt
from vecs import Vecs
try:
opts, args = getopt(sys.argv[1:], '', ['embeddings=', 'vocab='])
except GetoptError, e:
print >> sys.stderr, e
sys.exit(2)
opt_embeddings = None
opt_vocab = None
for o, a in opts:
if o == '--embeddings':
opt_embeddings = a
if o == '--vocab':
opt_vocab = a
if not opt_vocab:
print >> sys.stderr, 'please specify a vocabulary file with "--vocab"'
sys.exit(2)
if not opt_embeddings:
print >> sys.stderr, 'please specify the embeddings with "--embeddings"'
sys.exit(2)
try:
vecs = Vecs(opt_vocab, opt_embeddings)
except IOError, e:
print >> sys.stderr, e
sys.exit(1)
def evaluate(lines):
acts, preds = [], []
with open(filename, 'r') as lines:
for line in lines:
w1, w2, act = line.strip().split('\t')
pred = vecs.similarity(w1, w2)
if pred is None:
continue
acts.append(float(act))
preds.append(pred)
rho, _ = scipy.stats.spearmanr(acts, preds)
return rho
for filename in args:
with open(filename, 'r') as lines:
print '%0.3f %s' % (evaluate(lines), filename)
| models-master | swivel/wordsim.py |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a Glove binary co-occurrence matrix into Swivel shards.
Usage:
glove_to_shards.py --input <coocs> --vocab <vocab> --output_dir <output_dir>
Options
--input <coocs>
The Glove co-occurrence file.
--vocab <vocab>
Path to the vocabulary text file, one token per line.
--output_dir <directory>
Specifies the touput directory where the various Swivel data
files sohuld be placed.
--shard_size <int>
Specifies the shard size; default 4096.
"""
from __future__ import print_function
import itertools
import os
import struct
import sys
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_string('input', 'coocurrences.bin', 'Vocabulary file')
flags.DEFINE_string('vocab', 'vocab.txt', 'Vocabulary file')
flags.DEFINE_string('output_dir', '/tmp/swivel_data', 'Output directory')
flags.DEFINE_integer('shard_size', 4096, 'Shard size')
FLAGS = tf.app.flags.FLAGS
glove_cooc_fmt = struct.Struct('iid')
shard_cooc_fmt = struct.Struct('if')
def make_shard_files(coocs, nshards, vocab_sz):
"""Chops the binary Glove co-occurrence matrix into shards.
This reads the Glove binary co-occurrence file and assigns individual
co-occurrence counts to the appropriate Swivel shard.
Args:
coocs: the co-occurrnece file to read
nshards: the number of shards along one dimension of the square matrix
vocab_sz: the vocabulary size
Returns:
A (shard_table, marginals) tuple. The shard_table maps the row and column
shard ID to a file handle containing the co-occurrences for that shard; the
marginals contain the marginal sums.
"""
row_sums = [0] * vocab_sz
col_sums = [0] * vocab_sz
coocs.seek(0, os.SEEK_END)
ncoocs = coocs.tell() / glove_cooc_fmt.size
coocs.seek(0, os.SEEK_SET)
shard_files = {}
for row in range(nshards):
for col in range(nshards):
filename = os.path.join(
FLAGS.output_dir, 'shard-%03d-%03d.bin' % (row, col))
shard_files[(row, col)] = open(filename, 'w+')
for ix in xrange(ncoocs):
if ix % 1000000 == 0:
sys.stdout.write('\rsharding co-occurrences: %0.1f%% (%d/%d)' % (
100.0 * ix / ncoocs, ix, ncoocs))
sys.stdout.flush()
bits = coocs.read(glove_cooc_fmt.size)
if not bits:
break
# Glove has 1-indexed IDs.
row_id, col_id, cnt = glove_cooc_fmt.unpack(bits)
if row_id > vocab_sz or col_id > vocab_sz:
continue
row_id -= 1
row_shard = row_id % nshards
row_off = row_id / nshards
col_id -= 1
col_shard = col_id % nshards
col_off = col_id / nshards
shard_pos = row_off * FLAGS.shard_size + col_off # row major
shard_files[(row_shard, col_shard)].write(
shard_cooc_fmt.pack(shard_pos, cnt))
# Accumulate marginals.
row_sums[row_id] += cnt
col_sums[col_id] += cnt
sys.stdout.write('\n')
if any(abs(r - c) > 0.1 for r, c in itertools.izip(row_sums, col_sums)):
print('WARNING! Row and column marginals differ; is your matrix symmetric?',
file=sys.stderr)
return (shard_files, row_sums)
def main(_):
with open(FLAGS.vocab, 'r') as lines:
orig_vocab_sz = sum(1 for _ in lines)
shard_sz = FLAGS.shard_size
vocab_sz = orig_vocab_sz - orig_vocab_sz % shard_sz
nshards = vocab_sz / shard_sz
print('vocab size is %d (originally %d), %d %dx%d-element shards' % (
vocab_sz, orig_vocab_sz, nshards * nshards, shard_sz, shard_sz))
# Create the output directory, if necessary
if FLAGS.output_dir and not os.path.isdir(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
with open(FLAGS.input, 'r') as coocs:
shard_files, marginals = make_shard_files(coocs, nshards, vocab_sz)
# Now sort the shards and write the TFRecords.
filename = os.path.join(FLAGS.output_dir, 'shards.recs')
with tf.python_io.TFRecordWriter(filename) as writer:
ix = 0
for (row, col), fh in shard_files.iteritems():
ix += 1
sys.stdout.write('\rwriting shard %d/%d' % (ix, len(shard_files)))
sys.stdout.flush()
fh.seek(0)
buf = fh.read()
os.unlink(fh.name)
fh.close()
coocs = [
shard_cooc_fmt.unpack_from(buf, off)
for off in range(0, len(buf), shard_cooc_fmt.size)]
# N.B. we assume that there aren't any duplicates here!
coocs.sort(key=lambda kv: kv[0])
def _int64s(xs):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(xs)))
def _floats(xs):
return tf.train.Feature(float_list=tf.train.FloatList(value=list(xs)))
example = tf.train.Example(features=tf.train.Features(feature={
'global_row': _int64s(row + nshards * i for i in range(shard_sz)),
'global_col': _int64s(col + nshards * i for i in range(shard_sz)),
'sparse_local_row': _int64s(pos / shard_sz for pos, _ in coocs),
'sparse_local_col': _int64s(pos % shard_sz for pos, _ in coocs),
'sparse_value': _floats(cnt for _, cnt in coocs)}))
writer.write(example.SerializeToString())
print('\nwriting marginals...')
with open(os.path.join(FLAGS.output_dir, 'marginals.txt'), 'w') as fh:
for cnt in marginals:
fh.write('%0.1f\n' % cnt)
print('done!')
if __name__ == '__main__':
tf.app.run()
| models-master | swivel/glove_to_shards.py |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts vectors from text to a binary format for quicker manipulation.
Usage:
text2bin.py -o <out> -v <vocab> vec1.txt [vec2.txt ...]
Optiona:
-o <filename>, --output <filename>
The name of the file into which the binary vectors are written.
-v <filename>, --vocab <filename>
The name of the file into which the vocabulary is written.
Description
This program merges one or more whitespace separated vector files into a single
binary vector file that can be used by downstream evaluation tools in this
directory ("wordsim.py" and "analogy").
If more than one vector file is specified, then the files must be aligned
row-wise (i.e., each line must correspond to the same embedding), and they must
have the same number of columns (i.e., be the same dimension).
"""
from itertools import izip
from getopt import GetoptError, getopt
import os
import struct
import sys
try:
opts, args = getopt(
sys.argv[1:], 'o:v:', ['output=', 'vocab='])
except GetoptError, e:
print >> sys.stderr, e
sys.exit(2)
opt_output = 'vecs.bin'
opt_vocab = 'vocab.txt'
for o, a in opts:
if o in ('-o', '--output'):
opt_output = a
if o in ('-v', '--vocab'):
opt_vocab = a
def go(fhs):
fmt = None
with open(opt_vocab, 'w') as vocab_out:
with open(opt_output, 'w') as vecs_out:
for lines in izip(*fhs):
parts = [line.split() for line in lines]
token = parts[0][0]
if any(part[0] != token for part in parts[1:]):
raise IOError('vector files must be aligned')
print >> vocab_out, token
vec = [sum(float(x) for x in xs) for xs in zip(*parts)[1:]]
if not fmt:
fmt = struct.Struct('%df' % len(vec))
vecs_out.write(fmt.pack(*vec))
if args:
fhs = [open(filename) for filename in args]
go(fhs)
for fh in fhs:
fh.close()
else:
go([sys.stdin])
| models-master | swivel/text2bin.py |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tool for inspecting nearest neighbors and analogies."""
import re
import sys
from getopt import GetoptError, getopt
from vecs import Vecs
try:
opts, args = getopt(sys.argv[1:], 'v:e:', ['vocab=', 'embeddings='])
except GetoptError, e:
print >> sys.stderr, e
sys.exit(2)
opt_vocab = 'vocab.txt'
opt_embeddings = None
for o, a in opts:
if o in ('-v', '--vocab'):
opt_vocab = a
if o in ('-e', '--embeddings'):
opt_embeddings = a
vecs = Vecs(opt_vocab, opt_embeddings)
while True:
sys.stdout.write('query> ')
sys.stdout.flush()
query = sys.stdin.readline().strip()
if not query:
break
parts = re.split(r'\s+', query)
if len(parts) == 1:
res = vecs.neighbors(parts[0])
elif len(parts) == 3:
vs = [vecs.lookup(w) for w in parts]
if any(v is None for v in vs):
print 'not in vocabulary: %s' % (
', '.join(tok for tok, v in zip(parts, vs) if v is None))
continue
res = vecs.neighbors(vs[2] - vs[0] + vs[1])
else:
print 'use a single word to query neighbors, or three words for analogy'
continue
if not res:
continue
for word, sim in res[:20]:
print '%0.4f: %s' % (sim, word)
print
| models-master | swivel/nearest.py |
import tensorflow as tf
print("hi")
# Creates a graph.
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
print("consts")
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
print("create session")
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
#sess = tf.Session()
# Runs the op.
print "starting"
print sess.run(c)
| models-master | inception/GPU-test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""A binary to train Inception in a distributed manner using multiple systems.
Please see accompanying README.md for details and instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception import inception_distributed_train
from inception import alexnet_distributed_train
from inception.imagenet_data import ImagenetData
FLAGS = tf.app.flags.FLAGS
def main(unused_args):
assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker'
# Extract all the hostnames for the ps and worker jobs to construct the
# cluster spec.
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
tf.logging.info('PS hosts are: %s' % ps_hosts)
tf.logging.info('Worker hosts are: %s' % worker_hosts)
cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
'worker': worker_hosts})
server = tf.train.Server(
{'ps': ps_hosts,
'worker': worker_hosts},
job_name=FLAGS.job_name,
task_index=FLAGS.task_id)
if FLAGS.job_name == 'ps':
# `ps` jobs wait for incoming connections from the workers.
server.join()
else:
# `worker` jobs will actually do the work.
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
# Only the chief checks for or creates train_dir.
if FLAGS.task_id == 0:
if not tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.alexnet:
alexnet_distributed_train.train(server.target, dataset, cluster_spec)
else:
inception_distributed_train.train(server.target, dataset, cluster_spec)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| models-master | inception/inception/imagenet_distributed_train.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build the Inception v3 network on ImageNet data set.
The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567
Summary of available functions:
inference: Compute inference on the model inputs to make a prediction
loss: Compute the loss of the prediction with respect to the labels
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from inception.slim import slim, ops
FLAGS = tf.app.flags.FLAGS
# If a model is trained using multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
# Batch normalization. Constant governing the exponential moving average of
# the 'global' mean and variance for all activations.
BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997
# The decay to use for the moving average.
MOVING_AVERAGE_DECAY = 0.9999
def inference(images, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
return logits, auxiliary_logits
def inference_backup(images, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
parameters = []
# conv1
with tf.name_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
# lrn1
# TODO(shlens, jiayq): Add a GPU version of local response normalization.
# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool1')
# conv2
with tf.name_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
# pool2
pool2 = tf.nn.max_pool(conv2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool2')
# conv3
with tf.name_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
# conv4
with tf.name_scope('conv4') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
# conv5
with tf.name_scope('conv5') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
# pool5
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool5')
net = ops.flatten(pool5, scope='flatten')
with tf.name_scope('fc6') as scope:
vec_rep = ops.fc(net, 4096, activation=tf.nn.relu, scope=scope)
with tf.name_scope('fc6') as scope:
logits = ops.fc(vec_rep, num_classes, activation=tf.nn.relu, scope=scope)
return logits
def loss(logits, labels, batch_size=None):
"""Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
batch_size: integer
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Reshape the labels into a dense Tensor of
# shape [FLAGS.batch_size, num_classes].
sparse_labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
num_classes = logits[0].get_shape()[-1].value
dense_labels = tf.sparse_to_dense(concated,
[batch_size, num_classes],
1.0, 0.0)
# Cross entropy loss for the main softmax prediction.
slim.losses.cross_entropy_loss(logits[0],
dense_labels,
label_smoothing=0.1,
weight=1.0)
# Cross entropy loss for the auxiliary softmax head.
slim.losses.cross_entropy_loss(logits[1],
dense_labels,
label_smoothing=0.1,
weight=0.4,
scope='aux_loss')
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summaries(endpoints):
with tf.name_scope('summaries'):
for act in endpoints.values():
_activation_summary(act)
| models-master | inception/inception/alexnet_model.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to evaluate Inception on the flowers data set.
Note that using the supplied pre-trained inception checkpoint, the eval should
achieve:
precision @ 1 = 0.7874 recall @ 5 = 0.9436 [50000 examples]
See the README.md for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception import inception_eval
from inception.imagenet_data import ImagenetData
FLAGS = tf.app.flags.FLAGS
def main(unused_argv=None):
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
inception_eval.evaluate(dataset)
if __name__ == '__main__':
tf.app.run()
| models-master | inception/inception/imagenet_eval.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Small library that points to the ImageNet data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from inception.dataset import Dataset
HACK = True
class ImagenetData(Dataset):
"""ImageNet data set."""
def __init__(self, subset):
super(ImagenetData, self).__init__('ImageNet', subset)
def num_classes(self):
"""Returns the number of classes in the data set."""
return 8
def num_examples_per_epoch(self):
"""Returns the number of examples in the data set."""
# Bounding box data consists of 615299 bounding boxes for 544546 images.
if self.subset == 'train':
if HACK:
return 10400
return 1281167
if self.subset == 'validation':
if HACK:
return 0
return 50000
def download_message(self):
"""Instruction to download and extract the tarball from Flowers website."""
print('Failed to find any ImageNet %s files'% self.subset)
print('')
print('If you have already downloaded and processed the data, then make '
'sure to set --data_dir to point to the directory containing the '
'location of the sharded TFRecords.\n')
print('If you have not downloaded and prepared the ImageNet data in the '
'TFRecord format, you will need to do this at least once. This '
'process could take several hours depending on the speed of your '
'computer and network connection\n')
print('Please see README.md for instructions on how to build '
'the ImageNet dataset using download_and_preprocess_imagenet.\n')
print('Note that the raw data size is 300 GB and the processed data size '
'is 150 GB. Please ensure you have at least 500GB disk space.')
| models-master | inception/inception/imagenet_data.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple replicas with synchronous update.
Please see accompanying README.md for details and instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing, compute_group_optimizer
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"')
tf.app.flags.DEFINE_string('ps_hosts', '',
"""Comma-separated list of hostname:port for the """
"""parameter server jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('worker_hosts', '',
"""Comma-separated list of hostname:port for the """
"""worker jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('train_dir', '/lfs/local/0/daniter/16-node-snapshot',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
# Task ID is used to select the chief and also to access the local_step for
# each replica to check staleness of the gradients in sync_replicas_optimizer.
tf.app.flags.DEFINE_integer(
'task_id', 0, 'Task ID of the worker/replica running the training.')
# More details can be found in the sync_replicas_optimizer class:
# tensorflow/python/training/sync_replicas_optimizer.py
tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1,
"""Number of gradients to collect before """
"""updating the parameters.""")
tf.app.flags.DEFINE_integer('save_interval_secs', 25*60,
'Save interval seconds.')
tf.app.flags.DEFINE_integer('save_summaries_secs', 30000, #300,
'Save summaries interval seconds.')
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981
tf.app.flags.DEFINE_float('initial_learning_rate', 0.01,#0.045,
'Initial learning rate.')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 10000.0,#2.0,
'Epochs after which learning rate decays.')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 1.0,#0.94,
'Learning rate decay factor.')
tf.app.flags.DEFINE_float('momentum', 0.9,'Momentum term')
tf.app.flags.DEFINE_boolean('sync', True, "Async Mode")
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def train(target, dataset, cluster_spec):
"""Train Inception on a dataset for a number of steps."""
# Number of workers and parameter servers are infered from the workers and ps
# hosts string.
num_workers = len(cluster_spec.as_dict()['worker'])
num_parameter_servers = len(cluster_spec.as_dict()['ps'])
# If no value is given, num_replicas_to_aggregate defaults to be the number of
# workers.
if FLAGS.num_replicas_to_aggregate == -1:
num_replicas_to_aggregate = num_workers
else:
num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate
# Both should be greater than 0 in a distributed training.
assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '
'num_parameter_servers'
' must be > 0.')
# Choose worker 0 as the chief. Note that any worker could be the chief
# but there should be only one chief.
is_chief = (FLAGS.task_id == 0)
# Ops are assigned to worker by default.
with tf.device('/job:worker/task:%d' % FLAGS.task_id):
tf.set_random_seed(FLAGS.DANITER_SEED)
# Variables and its related init/assign ops are assigned to ps.
with slim.scopes.arg_scope(
[slim.variables.variable, slim.variables.global_step],
device=slim.variables.VariableDeviceChooser(num_parameter_servers)):
# Create a variable to count the number of train() calls. This equals the
# number of updates applied to the variables.
global_step = slim.variables.global_step()
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
# Decay steps need to be divided by the number of replicas to aggregate.
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /
num_replicas_to_aggregate)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Add a summary to track the learning rate.
tf.scalar_summary('learning_rate', lr)
# Create an optimizer that performs gradient descent.
#opt = tf.train.RMSPropOptimizer(lr,
# RMSPROP_DECAY,
# momentum=RMSPROP_MOMENTUM,
# epsilon=RMSPROP_EPSILON)
opt = tf.train.MomentumOptimizer(lr,FLAGS.momentum) # Tuning done for these!
tf.logging.info("Learning rate: %f, momentum: %f" % (FLAGS.initial_learning_rate, FLAGS.momentum))
images, labels = image_processing.distorted_inputs(
dataset,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
#with tf.control_dependencies([tf.Print(images, tf.split(0, 16, images), "images:", summarize=2)]):
logits = inception.inference(images, num_classes, for_training=True)
#with tf.control_dependencies([tf.Print(logits[0], [logits[0]], "logits", summarize=10)]):
# Add classification loss.
inception.loss(logits, labels)
#Accuracy
correct_prediction = tf.nn.in_top_k(logits[0], labels, 1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Gather all of the losses including regularization losses.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses, name='total_loss')
if is_chief:
# Compute the moving average of all individual losses and the
# total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for l in losses + [total_loss]:
loss_name = l.op.name
# Name each loss as '(raw)' and name the moving average version of the
# loss as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
# Add dependency to compute loss_averages.
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
# Track the moving averages of all trainable variables.
# Note that we maintain a 'double-average' of the BatchNormalization
# global statistics.
# This is not needed when the number of replicas are small but important
# for synchronous distributed training with tens of workers/replicas.
exp_moving_averager = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
# Add histograms for model variables.
for var in variables_to_average:
tf.histogram_summary(var.op.name, var)
# Create synchronous replica optimizer.
if FLAGS.sync:
tf.logging.info("Sync mode!!!!!!")
opt = compute_group_optimizer.ComputeGroupOptimizer(
opt,
replicas_to_aggregate=num_replicas_to_aggregate,
replica_id=FLAGS.task_id,
total_num_replicas=num_workers,
variable_averages=exp_moving_averager,
variables_to_average=variables_to_average)
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
assert batchnorm_updates, 'Batchnorm updates are missing'
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Add dependency to compute batchnorm_updates.
with tf.control_dependencies([batchnorm_updates_op]):
total_loss = tf.identity(total_loss)
# Compute gradients with respect to the loss.
grads = opt.compute_gradients(total_loss)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradients_op]):
train_op = tf.identity(total_loss, name='train_op')
# Get chief queue_runners, init_tokens and clean_up_op, which is used to
# synchronize replicas.
# More details can be found in sync_replicas_optimizer.
if FLAGS.sync:
chief_queue_runners = [opt.get_chief_queue_runner()]
init_tokens_op = opt.get_init_tokens_op()
clean_up_op = opt.get_clean_up_op()
# Create a saver.
saver = tf.train.Saver(max_to_keep=24)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init_op = tf.initialize_all_variables()
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
summary_op=None,
global_step=global_step,
saver=saver,
save_model_secs=0)#FLAGS.save_interval_secs)
tf.logging.info('%s Supervisor' % datetime.now())
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement)
# Get a session.
sess = sv.prepare_or_wait_for_session(target, config=sess_config)
# Start the queue runners.
queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
sv.start_queue_runners(sess, queue_runners)
tf.logging.info('Started %d queues for processing input data.',
len(queue_runners))
if is_chief and FLAGS.sync:
sv.start_queue_runners(sess, chief_queue_runners)
sess.run(init_tokens_op)
# Train, checking for Nans. Concurrently run the summary operation at a
# specified interval. Note that the summary_op and train_op never run
# simultaneously in order to prevent running out of GPU memory.
tf.set_random_seed(FLAGS.DANITER_SEED)
next_summary_time = time.time() + FLAGS.save_summaries_secs
while not sv.should_stop():
try:
start_time = time.time()
loss_value, step = sess.run([train_op, global_step])
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step > FLAGS.max_steps:
break
duration = time.time() - start_time
#Print Accuracy
tf.logging.info("Step: %d, Accuracy: %f, Loss: %f" %(step, sess.run(accuracy), loss_value))
if step % 3 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('Worker %d: %s: step %d, loss = %.2f'
'(%.1f examples/sec; %.3f sec/batch)')
tf.logging.info(format_str %
(FLAGS.task_id, datetime.now(), step, loss_value,
examples_per_sec, duration))
# Determine if the summary_op should be run on the chief worker.
if is_chief and next_summary_time < time.time():
tf.logging.info('Running Summary operation on the chief.')
summary_str = sess.run(summary_op)
sv.summary_computed(sess, summary_str)
tf.logging.info('Finished running Summary operation.')
# Determine the next time for running the summary.
next_summary_time += FLAGS.save_summaries_secs
except:
if is_chief:
tf.logging.info('About to execute sync_clean_up_op!')
sess.run(clean_up_op)
raise
# Stop the supervisor. This also waits for service threads to finish.
sv.stop()
# Save after the training ends.
#if is_chief:
# saver.save(sess,
# os.path.join(FLAGS.train_dir, 'model.ckpt'),
# global_step=global_step)
| models-master | inception/inception/inception_distributed_train.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train Inception on the flowers data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception import inception_train
from inception.flowers_data import FlowersData
FLAGS = tf.app.flags.FLAGS
def main(_):
dataset = FlowersData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_train.train(dataset)
if __name__ == '__main__':
tf.app.run()
| models-master | inception/inception/flowers_train.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train Inception on the ImageNet data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception import inception_train
from inception.imagenet_data import ImagenetData
FLAGS = tf.app.flags.FLAGS
def main(_):
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_train.train(dataset)
if __name__ == '__main__':
tf.app.run()
| models-master | inception/inception/imagenet_train.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple GPU's with synchronous updates.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from datetime import datetime
import os.path
import re
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_string('subset', 'train',
"""Either 'train' or 'validation'.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# Flags governing the type of training.
tf.app.flags.DEFINE_boolean('fine_tune', False,
"""If set, randomly initialize the final layer """
"""of weights in order to train the network on a """
"""new task.""")
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# With 8 Tesla K40's and a batch size = 256, the following setup achieves
# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).
# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.
tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
"""Initial learning rate.""")
tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
"""Epochs after which learning rate decays.""")
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
"""Learning rate decay factor.""")
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def _tower_loss(images, labels, num_classes, scope):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# When fine-tuning a model, we do not restore the logits but instead we
# randomly initialize the logits. The number of classes in the output of the
# logit is the number of classes in specified Dataset.
restore_logits = not FLAGS.fine_tune
# Build inference Graph.
logits = inception.inference(images, num_classes, for_training=True,
restore_logits=restore_logits,
scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = images.get_shape().as_list()[0]
inception.loss(logits, labels, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(dataset):
"""Train on dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
'Batch size must be divisible by number of GPUs')
split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
# Override the number of preprocessing threads to account for the increased
# number of GPU towers.
num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
images, labels = image_processing.distorted_inputs(
dataset,
num_preprocess_threads=num_preprocess_threads)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Split the batch of images and labels for towers.
images_splits = tf.split(0, FLAGS.num_gpus, images)
labels_splits = tf.split(0, FLAGS.num_gpus, labels)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
# Calculate the loss for one tower of the ImageNet model. This
# function constructs the entire ImageNet model but shares the
# variables across all towers.
loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,
scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
scope)
# Calculate the gradients for the batch of data on this ImageNet
# tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = _average_gradients(tower_grads)
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
# Note that we maintain a "double-average" of the BatchNormalization
# global statistics. This is more complicated then need be but we employ
# this for backward-compatibility with our previous models.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
# Another possiblility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all updates to into a single train op.
batchnorm_updates_op = tf.group(*batchnorm_updates)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(
FLAGS.train_dir,
graph_def=sess.graph.as_graph_def(add_shapes=True))
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 5000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
| models-master | inception/inception/inception_train.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Small library that points to a data set.
Methods of Data class:
data_files: Returns a python list of all (sharded) data set files.
num_examples_per_epoch: Returns the number of examples in the data set.
num_classes: Returns the number of classes in the data set.
reader: Return a reader for a single entry from the data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import os
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata',
"""Path to the processed data, i.e. """
"""TFRecord of Example protos.""")
class Dataset(object):
"""A simple class for handling data sets."""
__metaclass__ = ABCMeta
def __init__(self, name, subset):
"""Initialize dataset using a subset and the path to the data."""
assert subset in self.available_subsets(), self.available_subsets()
self.name = name
self.subset = subset
@abstractmethod
def num_classes(self):
"""Returns the number of classes in the data set."""
pass
# return 10
@abstractmethod
def num_examples_per_epoch(self):
"""Returns the number of examples in the data subset."""
pass
# if self.subset == 'train':
# return 10000
# if self.subset == 'validation':
# return 1000
@abstractmethod
def download_message(self):
"""Prints a download message for the Dataset."""
pass
def available_subsets(self):
"""Returns the list of available subsets."""
return ['train', 'validation']
def data_files(self):
"""Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
"""
tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
print('No files found for dataset %s/%s at %s' % (self.name,
self.subset,
FLAGS.data_dir))
self.download_message()
exit(-1)
return data_files
def reader(self):
"""Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.
"""
return tf.TFRecordReader()
| models-master | inception/inception/dataset.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple replicas with synchronous update.
Please see accompanying README.md for details and instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing, compute_group_optimizer
from inception import alexnet_model as alexnet
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
'''
tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"')
tf.app.flags.DEFINE_string('ps_hosts', '',
"""Comma-separated list of hostname:port for the """
"""parameter server jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('worker_hosts', '',
"""Comma-separated list of hostname:port for the """
"""worker jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
'''
tf.app.flags.DEFINE_boolean('alexnet', False, 'Use alexnet')
# Task ID is used to select the chief and also to access the local_step for
# each replica to check staleness of the gradients in sync_replicas_optimizer.
'''
tf.app.flags.DEFINE_integer(
'task_id', 0, 'Task ID of the worker/replica running the training.')
'''
# More details can be found in the sync_replicas_optimizer class:
# tensorflow/python/training/sync_replicas_optimizer.py
'''
tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1,
"""Number of gradients to collect before """
"""updating the parameters.""")
tf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60,
'Save interval seconds.')
tf.app.flags.DEFINE_integer('save_summaries_secs', 180,
'Save summaries interval seconds.')
'''
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981
'''
tf.app.flags.DEFINE_float('initial_learning_rate', 0.045,
'Initial learning rate.')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0,
'Epochs after which learning rate decays.')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
'Learning rate decay factor.')
'''
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def train(target, dataset, cluster_spec):
"""Train Inception on a dataset for a number of steps."""
# Number of workers and parameter servers are infered from the workers and ps
# hosts string.
num_workers = len(cluster_spec.as_dict()['worker'])
num_parameter_servers = len(cluster_spec.as_dict()['ps'])
# If no value is given, num_replicas_to_aggregate defaults to be the number of
# workers.
if FLAGS.num_replicas_to_aggregate == -1:
num_replicas_to_aggregate = num_workers
else:
num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate
# Both should be greater than 0 in a distributed training.
assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '
'num_parameter_servers'
' must be > 0.')
# Choose worker 0 as the chief. Note that any worker could be the chief
# but there should be only one chief.
is_chief = (FLAGS.task_id == 0)
# Ops are assigned to worker by default.
with tf.device('/job:worker/task:%d' % FLAGS.task_id):
# Variables and its related init/assign ops are assigned to ps.
with slim.scopes.arg_scope(
[slim.variables.variable, slim.variables.global_step],
device=slim.variables.VariableDeviceChooser(num_parameter_servers)):
# Create a variable to count the number of train() calls. This equals the
# number of updates applied to the variables.
global_step = slim.variables.global_step()
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
# Decay steps need to be divided by the number of replicas to aggregate.
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /
num_replicas_to_aggregate)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Add a summary to track the learning rate.
tf.scalar_summary('learning_rate', lr)
# Create an optimizer that performs gradient descent.
#opt = tf.train.RMSPropOptimizer(lr,
# RMSPROP_DECAY,
# momentum=RMSPROP_MOMENTUM,
# epsilon=RMSPROP_EPSILON)
opt = tf.train.MomentumOptimizer(lr,0.9) # Tuning done for these!
images, labels = image_processing.distorted_inputs(
dataset,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
logits = alexnet.inference(images, num_classes, for_training=True)
# Add classification loss.
alexnet.loss(logits, labels)
#Accuracy
correct_prediction = tf.nn.in_top_k(logits[0], labels, 1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Gather all of the losses including regularization losses.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses, name='total_loss')
if is_chief:
# Compute the moving average of all individual losses and the
# total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for l in losses + [total_loss]:
loss_name = l.op.name
# Name each loss as '(raw)' and name the moving average version of the
# loss as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
# Add dependency to compute loss_averages.
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
# Track the moving averages of all trainable variables.
# Note that we maintain a 'double-average' of the BatchNormalization
# global statistics.
# This is not needed when the number of replicas are small but important
# for synchronous distributed training with tens of workers/replicas.
exp_moving_averager = tf.train.ExponentialMovingAverage(
alexnet.MOVING_AVERAGE_DECAY, global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
# Add histograms for model variables.
for var in variables_to_average:
tf.histogram_summary(var.op.name, var)
# Create synchronous replica optimizer.
opt = compute_group_optimizer.ComputeGroupOptimizer(
opt,
replicas_to_aggregate=num_replicas_to_aggregate,
replica_id=FLAGS.task_id,
total_num_replicas=num_workers,
variable_averages=exp_moving_averager,
variables_to_average=variables_to_average)
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
assert batchnorm_updates, 'Batchnorm updates are missing'
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Add dependency to compute batchnorm_updates.
with tf.control_dependencies([batchnorm_updates_op]):
total_loss = tf.identity(total_loss)
# Compute gradients with respect to the loss.
grads = opt.compute_gradients(total_loss)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradients_op]):
train_op = tf.identity(total_loss, name='train_op')
# Get chief queue_runners, init_tokens and clean_up_op, which is used to
# synchronize replicas.
# More details can be found in sync_replicas_optimizer.
chief_queue_runners = [opt.get_chief_queue_runner()]
init_tokens_op = opt.get_init_tokens_op()
clean_up_op = opt.get_clean_up_op()
# Create a saver.
saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init_op = tf.initialize_all_variables()
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
summary_op=None,
global_step=global_step,
saver=saver,
save_model_secs=FLAGS.save_interval_secs)
tf.logging.info('%s Supervisor' % datetime.now())
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement)
# Get a session.
sess = sv.prepare_or_wait_for_session(target, config=sess_config)
# Start the queue runners.
queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
sv.start_queue_runners(sess, queue_runners)
tf.logging.info('Started %d queues for processing input data.',
len(queue_runners))
if is_chief:
sv.start_queue_runners(sess, chief_queue_runners)
sess.run(init_tokens_op)
# Train, checking for Nans. Concurrently run the summary operation at a
# specified interval. Note that the summary_op and train_op never run
# simultaneously in order to prevent running out of GPU memory.
next_summary_time = time.time() + FLAGS.save_summaries_secs
while not sv.should_stop():
try:
start_time = time.time()
loss_value, step = sess.run([train_op, global_step])
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step > FLAGS.max_steps:
break
duration = time.time() - start_time
#Print Accuracy
print(sess.run(accuracy))
if step % 3 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('Worker %d: %s: step %d, loss = %.2f'
'(%.1f examples/sec; %.3f sec/batch)')
tf.logging.info(format_str %
(FLAGS.task_id, datetime.now(), step, loss_value,
examples_per_sec, duration))
# Determine if the summary_op should be run on the chief worker.
if is_chief and next_summary_time < time.time():
tf.logging.info('Running Summary operation on the chief.')
summary_str = sess.run(summary_op)
sv.summary_computed(sess, summary_str)
tf.logging.info('Finished running Summary operation.')
# Determine the next time for running the summary.
next_summary_time += FLAGS.save_summaries_secs
except:
if is_chief:
tf.logging.info('About to execute sync_clean_up_op!')
sess.run(clean_up_op)
raise
# Stop the supervisor. This also waits for service threads to finish.
sv.stop()
# Save after the training ends.
if is_chief:
saver.save(sess,
os.path.join(FLAGS.train_dir, 'model.ckpt'),
global_step=global_step)
| models-master | inception/inception/alexnet_distributed_train.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build the Inception v3 network on ImageNet data set.
The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567
Summary of available functions:
inference: Compute inference on the model inputs to make a prediction
loss: Compute the loss of the prediction with respect to the labels
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
# If a model is trained using multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
# Batch normalization. Constant governing the exponential moving average of
# the 'global' mean and variance for all activations.
BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997
# The decay to use for the moving average.
MOVING_AVERAGE_DECAY = 1.0#0.9999
def inference(images, num_classes, for_training=False, restore_logits=True,
scope=None):
"""Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
"""
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
# Add summaries for viewing model statistics on TensorBoard.
_activation_summaries(endpoints)
# Grab the logits associated with the side head. Employed during training.
auxiliary_logits = endpoints['aux_logits']
return logits, auxiliary_logits
def loss(logits, labels, batch_size=None):
"""Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
batch_size: integer
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Reshape the labels into a dense Tensor of
# shape [FLAGS.batch_size, num_classes].
sparse_labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
num_classes = logits[0].get_shape()[-1].value
dense_labels = tf.sparse_to_dense(concated,
[batch_size, num_classes],
1.0, 0.0)
# Cross entropy loss for the main softmax prediction.
slim.losses.cross_entropy_loss(logits[0],
dense_labels,
label_smoothing=0.1,
weight=1.0)
# Cross entropy loss for the auxiliary softmax head.
slim.losses.cross_entropy_loss(logits[1],
dense_labels,
label_smoothing=0.1,
weight=0.4,
scope='aux_loss')
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summaries(endpoints):
with tf.name_scope('summaries'):
for act in endpoints.values():
_activation_summary(act)
| models-master | inception/inception/inception_model.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Small library that points to the flowers data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from inception.dataset import Dataset
class FlowersData(Dataset):
"""Flowers data set."""
def __init__(self, subset):
super(FlowersData, self).__init__('Flowers', subset)
def num_classes(self):
"""Returns the number of classes in the data set."""
return 5
def num_examples_per_epoch(self):
"""Returns the number of examples in the data subset."""
if self.subset == 'train':
return 3170
if self.subset == 'validation':
return 500
def download_message(self):
"""Instruction to download and extract the tarball from Flowers website."""
print('Failed to find any Flowers %s files'% self.subset)
print('')
print('If you have already downloaded and processed the data, then make '
'sure to set --data_dir to point to the directory containing the '
'location of the sharded TFRecords.\n')
print('Please see README.md for instructions on how to build '
'the flowers dataset using download_and_preprocess_flowers.\n')
| models-master | inception/inception/flowers_data.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to evaluate Inception on a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train',
"""Directory where to read model checkpoints.""")
# Flags governing the frequency of the eval.
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
# Flags governing the data used for the eval.
tf.app.flags.DEFINE_integer('num_examples', 50000,
"""Number of examples to run. Note that the eval """
"""ImageNet dataset contains 50000 examples.""")
tf.app.flags.DEFINE_string('subset', 'validation',
"""Either 'validation' or 'train'.""")
def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
"""Runs Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_1_op: Top 1 op.
top_5_op: Top 5 op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
ckpt.model_checkpoint_path))
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Succesfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
# Counts the number of correct predictions.
count_top_1 = 0.0
count_top_5 = 0.0
total_sample_count = num_iter * FLAGS.batch_size
step = 0
print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))
start_time = time.time()
while step < num_iter and not coord.should_stop():
top_1, top_5 = sess.run([top_1_op, top_5_op])
count_top_1 += np.sum(top_1)
count_top_5 += np.sum(top_5)
step += 1
if step % 20 == 0:
duration = time.time() - start_time
sec_per_batch = duration / 20.0
examples_per_sec = FLAGS.batch_size / sec_per_batch
print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
'sec/batch)' % (datetime.now(), step, num_iter,
examples_per_sec, sec_per_batch))
start_time = time.time()
# Compute precision @ 1.
precision_at_1 = count_top_1 / total_sample_count
recall_at_5 = count_top_5 / total_sample_count
print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' %
(datetime.now(), precision_at_1, recall_at_5, total_sample_count))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision_at_1)
summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate(dataset):
"""Evaluate model on Dataset for a number of steps."""
with tf.Graph().as_default():
# Get images and labels from the dataset.
images, labels = image_processing.inputs(dataset)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Build a Graph that computes the logits predictions from the
# inference model.
logits, _ = inception.inference(images, num_classes)
# Calculate predictions.
top_1_op = tf.nn.in_top_k(logits, labels, 1)
top_5_op = tf.nn.in_top_k(logits, labels, 5)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
_eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
| models-master | inception/inception/inception_eval.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
#from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
import tensorflow as tf
class ComputeGroupOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
summing them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following queues are created:
<empty line>
* N `gradient` queues, one per variable to train. Gradients are pushed to
these queues and the chief worker will dequeue_many and then sum them
before applying to variables.
* 1 `token` queue where the optimizer pushes the new global_step value after
all gradients have been applied.
The following variables are created:
* N `local_step`, one per replica. Compared against global step to check for
staleness of the gradients.
This adds nodes to the graph to collect gradients and pause the trainers until
variables are updated.
For the PS:
<empty line>
1. A queue is created for each variable, and each replica now pushes the
gradients into the queue instead of directly applying them to the
variables.
2. For each gradient_queue, pop and sum the gradients once enough
replicas (replicas_to_aggregate) have pushed gradients to the queue.
3. Apply the aggregated gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, clear all the gradients in the queues as they are
stale now (could happen when replicas are restarted and push to the queues
multiple times, or from the backup replicas).
6. Only after step 5, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch it to its local_step variable
and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into `gradient_queue` only
if local_step equals global_step, otherwise the gradients are just dropped.
This avoids stale gradients.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
replica_id=task_id, total_num_replicas=50)
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
# Some models have startup_delays to help stablize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
grads = opt.minimize(total_loss, global_step=self.global_step)
# You can now call get_init_tokens_op() and get_chief_queue_runner().
# Note that get_init_tokens_op() must be called before creating session
# because it modifies the graph.
init_token_op = opt.get_init_tokens_op()
chief_queue_runner = opt.get_chief_queue_runner()
```
In the training program, every worker will run the train_op as if not
synchronized. But one worker (usually the chief) will need to execute the
chief_queue_runner and get_init_tokens_op generated from this optimizer.
```python
# After the session is created by the superviser and before the main while
# loop:
if is_chief and FLAGS.sync_replicas:
sv.start_queue_runners(sess, [chief_queue_runner])
# Insert initial tokens to the queue.
sess.run(init_token_op)
```
@@__init__
@@compute_gradients
@@apply_gradients
@@get_chief_queue_runner
@@get_init_tokens_op
"""
def __init__(self,
opt,
replicas_to_aggregate,
variable_averages=None,
variables_to_average=None,
replica_id=None,
total_num_replicas=0,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
replica_id: This is the task/worker/replica ID. Needed as index to access
local_steps to check staleness. Must be in the interval:
[0, total_num_replicas)
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas == 0:
total_num_replicas = replicas_to_aggregate
super(ComputeGroupOptimizer, self).__init__(use_locking, name)
tf.logging.info(
"SyncReplicas enabled: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._replica_id = replica_id
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# This will be executed in a queue runner and includes the synchronization
# operations done by the chief.
self._chief_queue_runner = None
# Remember which queue is on which device for the "clear" operation.
# This list contains list of the following format: (grad_queue, device).
self._one_element_queue_list = []
# Sparse gradients queue has both value and index
self._sparse_grad_queues_and_devs = []
# clean_up_op will be executed when the chief is about to restart.
# If chief restarts, it is possible that some variables have already been
# updated before and when chief comes back, these variables will not be
# updated again as the workers have already computed the gradients for
# them.
# But chief still waits for all variables to be updated, which will hang
# the training.
# To avoid such hang, every time the chief is about to die, it will call
# abort_op to kill the PS with the token_queue so all replicas will also
# restart.
# TODO(jmchen): When training restarts, the variables are restored from the
# previous checkpoint. As such all the gradients in all the queues should be
# removed as they are computed from potentially different variables.
# Currently this is not done.
self._clean_up_op = None
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def _aggregate_sparse_grad(self, grad, var, train_ops):
"""Aggregate sparse gradients.
Args:
grad: The sparse gradient to aggregate.
var: The variable to apply this gradient to.
train_ops: The train_ops for the worker to run.
Returns:
aggregated_grad: Aggregated grad.
"""
# Sparse gradients have to be inserted as one pair of (value,
# indice) as an element instead of the whole "indexedslice" because
# their shapes are not deterministic.
sparse_grad_queue = (data_flow_ops.FIFOQueue(
-1,
(grad.values.dtype, grad.indices.dtype),
shapes=(var.get_shape().as_list()[1:], ()),
shared_name="sparse_grad_q_%s" % var.name))
self._sparse_grad_queues_and_devs.append((sparse_grad_queue, var.device))
# Sparse token is inserted after the "enqueue_many" finishes. This
# is needed to make sure enough sparse gradients have been enqueued
# before applying them to the variables.
sparse_token_queue = (data_flow_ops.FIFOQueue(
self._replicas_to_aggregate * 2,
types_pb2.DT_INT32,
shapes=(),
shared_name="sparse_token_q_%s" % var.name))
self._one_element_queue_list.append((sparse_token_queue, var.device))
enqueue_spares_op = sparse_grad_queue.enqueue_many([grad.values,
grad.indices])
with ops.control_dependencies([enqueue_spares_op]):
train_ops.append(sparse_token_queue.enqueue((1,)))
with ops.control_dependencies([sparse_token_queue.dequeue_many(
self._replicas_to_aggregate)]):
values, indices = sparse_grad_queue.dequeue_many(sparse_grad_queue.size())
concat_grad = ops.IndexedSlices(values, indices, grad.dense_shape)
# Sum the gradients of the same variables in the sparse layers so
# that each variable is only updated once. Note that with 2
# gradients g1 and g2 from 2 replicas for the same variable,
# apply(g1+g2) is different from apply(g1) and then apply(g2) when
# the optimizer is complex like Momentum or Adagrad.
values = concat_grad.values
indices = concat_grad.indices
new_indices, indx = array_ops.unique(indices)
num_indices = array_ops.shape(new_indices)[0]
sum_values = math_ops.unsorted_segment_sum(values, indx, num_indices)
return ops.IndexedSlices(sum_values, new_indices, concat_grad.dense_shape)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
inputs = []
var_list = []
for x in grads_and_vars:
inputs.extend(list(x))
with ops.device(global_step.device):
self._local_steps = variables.Variable(
array_ops.zeros(
[self._total_num_replicas],
dtype=global_step.dtype),
trainable=False,
name="local_steps")
# Check staleness. Note that this has to be ref(), otherwise identity will
# be accessed and it will be old values.
local_step = array_ops.slice(self._local_steps.ref(),
array_ops.reshape(self._replica_id, (1,)),
[1],
name="get_local_step")
local_step = array_ops.reshape(local_step, ())
is_stale = math_ops.less(local_step, global_step)
with ops.op_scope(inputs, None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
if isinstance(grad, ops.Tensor):
gradient_queue = (data_flow_ops.FIFOQueue(self._tokens_per_step * 2,
grad.dtype,
shapes=var.get_shape(),
shared_name=var.name))
self._one_element_queue_list.append((gradient_queue, var.device))
train_ops.append(gradient_queue.enqueue([grad]))
# Aggregate all gradients
gradients = gradient_queue.dequeue_many(
self._replicas_to_aggregate)
aggregated_grad.append(math_ops.reduce_sum(gradients, [0]))
elif grad is None:
aggregated_grad.append(None) # pass-through.
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
aggregated_grad.append(self._aggregate_sparse_grad(grad, var,
train_ops))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
with tf.control_dependencies([tf.Print(global_step, [global_step], "~~~~~ daniter~~~ GLobalstep:")]):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
shared_name="dummy_queue"))
# Clear all the gradients queues in case there are stale gradients.
clear_queue_ops = []
with ops.control_dependencies([update_op]):
for queue, dev in self._one_element_queue_list:
with ops.device(dev):
stale_grads = queue.dequeue_many(queue.size())
clear_queue_ops.append(stale_grads)
for queue, dev in self._sparse_grad_queues_and_devs:
with ops.device(dev):
_, stale_indices = queue.dequeue_many(queue.size())
clear_queue_ops.append(stale_indices)
with ops.device(global_step.device):
self._clean_up_op = control_flow_ops.abort(
error_msg="From sync_replicas")
# According to the staleness, select between the enqueue op (real_grad)
# or no-op (no_op_grad). Effectively dropping all the stale gradients.
no_op_grad = lambda: [control_flow_ops.no_op(name="no_grad_enqueue")]
real_grad = lambda: [control_flow_ops.group(*train_ops)]
final_train_ops = control_flow_ops.cond(is_stale, no_op_grad, real_grad)
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies([final_train_ops]):
token = sync_token_queue.dequeue()
train_op = state_ops.scatter_update(self._local_steps,
self._replica_id, token)
with ops.control_dependencies(clear_queue_ops):
#with ops.control_dependencies([tf.Print(tf.constant(2), [tf.constant(2)], "~~~~~ daniter~~~~~~ sync")]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
# Note that ref() is used to avoid reading from the identity with old
# the step.
tokens = array_ops.fill([self._tokens_per_step], global_step.ref())
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
self._gradients_applied = True
#train_op = tf.Print(train_op, [train_op], "~~~~~ daniter for the love of god1")
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_clean_up_op(self):
"""Returns the clean up op for the chief to execute before exit.
This includes the operation to abort the device with the token queue so all
other replicas can also restart. This can avoid potential hang when chief
restarts.
Note that this can only be called after calling apply_gradients().
Returns:
A clean_up_op for chief to execute before exits.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError(
"get_clean_up_op() should be called after apply_gradients().")
return self._clean_up_op
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens],
self._global_step.ref())
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
| models-master | inception/inception/compute_group_optimizer.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in parallel across multiple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
HACK=True
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('image_size', 256,#299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 4,
"""Number of parallel readers during train.""")
# Images are preprocessed asynchronously using multiple threads specified by
# --num_preprocss_threads and the resulting processed images are stored in a
# random shuffling queue. The shuffling queue dequeues --batch_size images
# for processing on a given Inception tower. A larger shuffling queue guarantees
# better mixing across examples within a batch and results in slightly higher
# predictive performance in a trained model. Empirically,
# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
# 16GB. If the machine is memory limited, then decrease this factor to
# decrease the CPU memory footprint, accordingly.
tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16,
"""Size of the queue of preprocessed images. """
"""Default is ideal but try smaller values, e.g. """
"""4, 2 or 1, if host memory is constrained. See """
"""comments in code for more details.""")
tf.app.flags.DEFINE_integer('DANITER_SEED', 8732, 'random seed')
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of ImageNet images for evaluation.
Use this function as the inputs for evaluating a network.
Note that some (minimal) image preprocessing occurs during evaluation
including central cropping and resizing of the image to fit the network.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
image_size, 3].
labels: 1-D integer Tensor of [FLAGS.batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.op_scope([image], scope, 'distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
tf.set_random_seed(FLAGS.DANITER_SEED)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.image_summary('images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, height, width,
resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.image_summary('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.image_summary('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.op_scope([image, height, width], scope, 'eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if not HACK:
if bbox is None:
raise ValueError('Please supply a bounding box.')
if HACK:
image = image_buffer
#image = image.reshape([256, 256, 3])
else:
image = decode_jpeg(image_buffer)
height = FLAGS.image_size
width = FLAGS.image_size
if not HACK:
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def parse_daniter_example(example_serialized):
# Dense features in Example proto.
feature_map = {
'height': tf.FixedLenFeature([], dtype=tf.int64,
default_value=0),
'width': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=0),
'depth': tf.FixedLenFeature([], dtype=tf.int64,
default_value=0),
'label': tf.FixedLenFeature([], dtype=tf.int64,
default_value=-1),
'image_raw': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['label'], dtype=tf.int32)
image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.reshape(image, [256, 256, 3])
print("not cropping. Imagesize: %d" % FLAGS.image_size)
#image = tf.image.resize_image_with_crop_or_pad(image, FLAGS.image_size, FLAGS.image_size) # put this back!
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
return image, label
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
num_readers=1):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
tf.set_random_seed(FLAGS.DANITER_SEED)
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=True,
capacity=16,
seed=FLAGS.DANITER_SEED)
else:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=False,
capacity=1,
seed=FLAGS.DANITER_SEED)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
#if num_preprocess_threads % 4:
# raise ValueError('Please make num_preprocess_threads a multiple '
# 'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string], seed=FLAGS.DANITER_SEED)
# examples_queue = tf.FIFOQueue(
# capacity=examples_per_shard + 3 * batch_size,
# dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = dataset.reader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = dataset.reader()
_, example_serialized = reader.read(filename_queue)
images_and_labels = []
if HACK:
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index = parse_daniter_example(example_serialized)
image = image_preprocessing(image_buffer, None, train, thread_id)
images_and_labels.append([image, label_index])
else:
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index, bbox, _ = parse_example_proto(
example_serialized)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index])
images, label_index_batch = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
# Reshape images into these desired dimensions.
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_index_batch, [batch_size])
| models-master | inception/inception/image_processing.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to evaluate Inception on the flowers data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception import inception_eval
from inception.flowers_data import FlowersData
FLAGS = tf.app.flags.FLAGS
def main(unused_argv=None):
dataset = FlowersData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
inception_eval.evaluate(dataset)
if __name__ == '__main__':
tf.app.run()
| models-master | inception/inception/flowers_eval.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.