python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ["set_trace"]
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
| ClassyVision-main | classy_vision/generic/pdb.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def debug_info(type, value, tb):
if hasattr(sys, "ps1") or not sys.stderr.isatty():
sys.__excepthook__(type, value, tb)
else:
import pdb
import traceback
traceback.print_exception(type, value, tb)
print
pdb.post_mortem(tb)
| ClassyVision-main | classy_vision/generic/debug.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import defaultdict, deque
from time import perf_counter
from typing import List, Mapping, Optional, Tuple
import torch
from torch.cuda import Event as CudaEvent
class PerfTimer:
"""
Very simple timing wrapper, with context manager wrapping.
Typical usage:
with PerfTimer('forward_pass', perf_stats):
model.forward(data)
# ...
with PerfTimer('backward_pass', perf_stats):
model.backward(loss)
# ...
print(perf_stats.report_str())
Note that timer stats accumulate by name, so you can as if resume them
by re-using the name.
You can also use it without context manager, i.e. via start() / stop() directly.
If supplied PerfStats is constructed with use_cuda_events=True (which is default),
then Cuda events will be added to correctly track time of async execution
of Cuda kernels:
with PerfTimer('foobar', perf_stats):
some_cpu_work()
schedule_some_cuda_work()
In example above, the "Host" column will capture elapsed time from the perspective
of the Python process, and "CudaEvent" column will capture elapsed time between
scheduling of Cuda work (within the PerfTimer scope) and completion of this work,
some of which might happen outside the PerfTimer scope.
If perf_stats is None, using PerfTimer does nothing.
"""
def __init__(self, timer_name: str, perf_stats: Optional["PerfStats"]):
self.skip: bool = False
if perf_stats is None:
self.skip = True
return
self.name: str = timer_name
self.elapsed: float = 0.0
self._last_interval: float = 0.0
self._perf_stats: PerfStats = perf_stats
self._is_running: bool = False
if perf_stats.use_cuda_events():
self._cuda_event_intervals: List[Tuple[CudaEvent, CudaEvent]] = []
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exception, traceback):
self.stop()
if exc_type is None:
# Only record timer value if with-context finished without error
self.record()
return False # re-raise if there was exception
def start(self):
if self.skip or self._is_running:
return
self._last_interval = 0.0
self._is_running = True
self._start_time: float = perf_counter()
if self._perf_stats.use_cuda_events():
self._start_event = torch.cuda.Event(enable_timing=True)
self._start_event.record()
def stop(self):
if self.skip or not self._is_running:
return
self._last_interval = perf_counter() - self._start_time
self.elapsed += self._last_interval
if self._perf_stats.use_cuda_events():
# Two cuda events will measure real GPU time within PerfTimer scope:
end_event = torch.cuda.Event(enable_timing=True)
end_event.record()
self._cuda_event_intervals.append((self._start_event, end_event))
self._is_running = False
def record(self):
if self.skip:
return
assert not self._is_running
self._perf_stats.update_with_timer(self)
class PerfMetric:
"""
Encapsulates numerical tracking of a single metric, with a `.update(value)` API.
Under-the-hood this can additionally keep track of sums, (exp.) moving averages,
sum of squares (e.g. for stdev), filtered values, etc.
"""
# Coefficient for exponential moving average (EMA).
# Value of 0.1 means last 8 values account for ~50% of weight.
EMA_FACTOR = 0.1
def __init__(self):
self.last_value: Optional[float] = None
self.smoothed_value: Optional[float] = None
self.sum_values: float = 0.0
self.num_updates: int = 0
def update(self, value: float):
self.last_value = value
if self.smoothed_value is None:
self.smoothed_value = value
else:
# TODO (T47970762): correct for initialization bias
self.smoothed_value = (
PerfMetric.EMA_FACTOR * value
+ (1.0 - PerfMetric.EMA_FACTOR) * self.smoothed_value
)
self.sum_values += value
self.num_updates += 1
def get_avg(self):
if self.num_updates == 0:
return 0.0
else:
return self.sum_values / self.num_updates
class PerfStats:
"""
Accumulate stats (from timers) over many iterations
"""
MAX_PENDING_TIMERS = 1000
def __init__(self, use_cuda_events=True):
self._host_stats: Mapping[str, PerfMetric] = defaultdict(PerfMetric)
self._cuda_stats: Mapping[str, PerfMetric] = defaultdict(PerfMetric)
if use_cuda_events:
if torch.cuda.is_available():
self._cuda_pending_timers = deque(maxlen=PerfStats.MAX_PENDING_TIMERS)
else:
logging.warning("CUDA unavailable: CUDA events are not logged.")
self._cuda_pending_timers = None
else:
self._cuda_pending_timers = None
def update_with_timer(self, timer: PerfTimer):
self._host_stats[timer.name].update(timer._last_interval)
if self.use_cuda_events():
if len(self._cuda_pending_timers) >= self._cuda_pending_timers.maxlen:
logging.error(
"Too many pending timers. CudaEvent-based stats will be inaccurate!"
)
else:
self._cuda_pending_timers.append(timer)
self._process_cuda_events()
def _process_cuda_events(self):
"""
Service pending timers. Dequeue timers and aggregate Cuda time intervals,
until the first "pending" timer (i.e. dependent on a not-yet-ready cuda event).
"""
while len(self._cuda_pending_timers) > 0:
timer = self._cuda_pending_timers[0]
elapsed_cuda = 0.0
for ev_start, ev_end in timer._cuda_event_intervals:
if not ev_start.query() or not ev_end.query():
# Cuda events associated with this timer aren't ready yet,
# stop servicing the queue.
return
# Use seconds (instead of ms) for consistency with "host" timers
elapsed_cuda += ev_start.elapsed_time(ev_end) / 1000.0
# All time intervals for this timer are now accounted for.
# Aggregate stats and pop from pending queue.
self._cuda_stats[timer.name].update(elapsed_cuda)
self._cuda_pending_timers.popleft()
def report_str(self):
"""
Fancy column-aligned human-readable report.
If using Cuda events, calling this invokes cuda.synchronize(), which is needed
to capture pending Cuda work in the report.
"""
if self.use_cuda_events():
torch.cuda.synchronize()
self._process_cuda_events()
name_width = max(len(k) for k in self._host_stats.keys())
header = ("{:>" + str(name_width + 4) + "s} {:>7s} {:>7s}").format(
"Timer", "Host", "CudaEvent"
)
row_fmt = "{:>" + str(name_width + 4) + "s}: {:>7.2f} ms {:>7.2f} ms"
rows = []
rows.append(header)
for name, metric in self._host_stats.items():
rows.append(
row_fmt.format(
name,
metric.get_avg() * 1000.0,
self._cuda_stats[name].get_avg() * 1000.0,
)
)
return "\n".join(rows)
def use_cuda_events(self):
return torch.cuda.is_available() and self._cuda_pending_timers is not None
def __str__(self):
return str((self._host_stats, self._cuda_stats))
| ClassyVision-main | classy_vision/generic/perf_stats.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from classy_vision.generic.util import is_pos_int
def add_generic_args(parser):
"""
Adds generic command-line arguments for convnet training / testing to parser.
"""
parser.add_argument(
"--config_file", type=str, help="path to config file for model", required=True
)
parser.add_argument(
"--checkpoint_folder",
default="",
type=str,
help="""folder to use for saving checkpoints:
epochal checkpoints are stored as model_<epoch>.torch,
latest epoch checkpoint is at checkpoint.torch""",
)
parser.add_argument(
"--checkpoint_load_path",
default="",
type=str,
help="""path to load a checkpoint from, which can be a file or a directory:
If the path is a directory, the checkpoint file is assumed to be
checkpoint.torch""",
)
parser.add_argument(
"--pretrained_checkpoint_path",
default="",
type=str,
help="""path to load a pre-trained checkpoints from, which can be a file or a
directory:
If the path is a directory, the checkpoint file is assumed to be
checkpoint.torch. This checkpoint is only used for fine-tuning
tasks, and training will not resume from this checkpoint.""",
)
parser.add_argument(
"--checkpoint_period",
default=1,
type=int,
help="""Checkpoint every x phases (default 1)""",
)
parser.add_argument(
"--show_progress",
default=False,
action="store_true",
help="shows progress bar during training / testing",
)
parser.add_argument(
"--skip_tensorboard",
default=False,
action="store_true",
help="do not perform tensorboard visualization",
)
parser.add_argument(
"--visdom_server",
default="",
type=str,
help="visdom server to use (default None)",
)
parser.add_argument(
"--visdom_port",
default=8097,
type=int,
help="port of visdom server (default = 8097)",
)
parser.add_argument(
"--profiler",
default=False,
action="store_true",
help="specify this argument to profile training code",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="specify this argument for debugging mode",
)
parser.add_argument(
"--ignore_checkpoint_config",
default=False,
action="store_true",
help="""specify this argument to ignore
the compatibility of the config (or lack of config) attached
to the checkpoint; this will allow mismatches between
the training specified in the config and the
actual training of the model""",
)
parser.add_argument(
"--log_freq",
default=5,
type=int,
help="Logging frequency for LossLrMeterLoggingHook (default 5)",
)
parser.add_argument(
"--image_backend",
default="PIL",
type=str,
help="torchvision image decoder backend (PIL or accimage). Default PIL",
)
parser.add_argument(
"--video_backend",
default="pyav",
type=str,
help="torchvision video decoder backend (pyav or video_reader). Default pyav",
)
parser.add_argument(
"--distributed_backend",
default="none",
type=str,
help="""Distributed backend: either 'none' (for non-distributed runs)
or 'ddp' (for distributed runs). Default none.""",
)
return parser
def check_generic_args(args):
"""
Perform assertions on generic command-line arguments.
"""
# check types and values:
assert is_pos_int(args.visdom_port), "incorrect visdom port"
# create checkpoint folder if it does not exist:
if args.checkpoint_folder != "" and not os.path.exists(args.checkpoint_folder):
os.makedirs(args.checkpoint_folder, exist_ok=True)
assert os.path.exists(args.checkpoint_folder), (
"could not create folder %s" % args.checkpoint_folder
)
# when in debugging mode, enter debugger upon error:
if args.debug:
import sys
from classy_vision.generic.debug import debug_info
sys.excepthook = debug_info
# check visdom server name:
if args.visdom_server != "":
if args.visdom_server.startswith("https://"):
print("WARNING: Visdom does not work over HTTPS.")
args.visdom_server = args.visdom_server[8:]
if not args.visdom_server.startswith("http://"):
args.visdom_server = "http://%s" % args.visdom_server
# return input arguments:
return args
def get_parser():
"""
Return a standard command-line parser.
"""
parser = argparse.ArgumentParser(
description="""Start a Classy Vision training job.
This can be used for training on your local machine, using CPU or GPU, and
for distributed training. This script also supports Tensorboard, Visdom and
checkpointing."""
)
parser = add_generic_args(parser)
return parser
def parse_train_arguments(parser=None):
"""
Assert and parse the command-line arguments of a given (or default) parser.
"""
# set input arguments:
if parser is None:
parser = get_parser()
# parse input arguments:
args = parser.parse_args()
# assertions:
args = check_generic_args(args)
return args
| ClassyVision-main | classy_vision/generic/opts.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections.abc as abc
import logging
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from classy_vision.generic.util import (
eval_model,
get_batchsize_per_replica,
get_model_dummy_input,
is_leaf,
is_on_gpu,
)
from torch.cuda import cudart
class ClassyProfilerError(Exception):
pass
class ClassyProfilerNotImplementedError(ClassyProfilerError):
def __init__(self, module: nn.Module):
self.module = module
super().__init__(f"Profiling not implemented for module: {self.module}")
def profile(
model: nn.Module,
batchsize_per_replica: int = 32,
input_shape: Tuple[int] = (3, 224, 224),
use_nvprof: bool = False,
input_key: Optional[Union[str, List[str]]] = None,
):
"""
Performs CPU or GPU profiling of the specified model on the specified input.
"""
# assertions:
if use_nvprof:
raise ClassyProfilerError("Profiling not supported with nvprof")
# FIXME (mannatsingh): in case of use_nvprof, exit() is called at the end
# and we do not return a profile.
assert is_on_gpu(model), "can only nvprof model that lives on GPU"
logging.info("CUDA profiling: Make sure you are running under nvprof!")
# input for model:
input = get_model_dummy_input(
model,
input_shape,
input_key,
batchsize=batchsize_per_replica,
non_blocking=False,
)
# perform profiling in eval mode
with eval_model(model), torch.no_grad():
model(input) # warm up CUDA memory allocator and profiler
if use_nvprof: # nvprof profiling (TODO: Can we infer this?)
cudart().cudaProfilerStart()
model(input)
cudart().cudaProfilerStop()
exit() # exit gracefully
else: # regular profiling
with torch.autograd.profiler.profile(use_cuda=True) as profiler:
model(input)
return profiler
def get_shape(x: Union[Tuple, List, Dict]) -> Union[Tuple, List, Dict]:
"""
Some layer may take/generate tuple/list/dict/list[dict] as input/output in forward function.
We recursively query tensor shape.
"""
if isinstance(x, (list, tuple)):
assert len(x) > 0, "x of tuple/list type must have at least one element"
return [get_shape(xi) for xi in x]
elif isinstance(x, dict):
return {k: get_shape(v) for k, v in x.items()}
else:
assert isinstance(x, torch.Tensor), "x is expected to be a torch tensor"
return x.size()
def _layer_flops(layer: nn.Module, layer_args: List[Any], y: Any) -> int:
"""
Computes the number of FLOPs required for a single layer.
For common layers, such as Conv1d, the flop compute is implemented in this
centralized place.
For other layers, if it defines a method to compute flops with the signature
below, we will use it to compute flops.
Class MyModule(nn.Module):
def flops(self, x):
...
"""
x = layer_args[0]
# get layer type:
typestr = layer.__repr__()
layer_type = typestr[: typestr.find("(")].strip()
batchsize_per_replica = get_batchsize_per_replica(x)
flops = None
# 1D convolution:
if layer_type in ["Conv1d"]:
# x shape is N x C x W
out_w = int(
(x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0])
/ layer.stride[0]
+ 1
)
flops = (
batchsize_per_replica
* layer.in_channels
* layer.out_channels
* layer.kernel_size[0]
* out_w
/ layer.groups
)
# 2D convolution:
elif layer_type in ["Conv2d"]:
out_h = int(
(x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0])
/ layer.stride[0]
+ 1
)
out_w = int(
(x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1])
/ layer.stride[1]
+ 1
)
flops = (
batchsize_per_replica
* layer.in_channels
* layer.out_channels
* layer.kernel_size[0]
* layer.kernel_size[1]
* out_h
* out_w
/ layer.groups
)
# learned group convolution:
elif layer_type in ["LearnedGroupConv"]:
conv = layer.conv
out_h = int(
(x.size()[2] + 2 * conv.padding[0] - conv.kernel_size[0]) / conv.stride[0]
+ 1
)
out_w = int(
(x.size()[3] + 2 * conv.padding[1] - conv.kernel_size[1]) / conv.stride[1]
+ 1
)
count1 = _layer_flops(layer.relu, x) + _layer_flops(layer.norm, x)
count2 = (
batchsize_per_replica
* conv.in_channels
* conv.out_channels
* conv.kernel_size[0]
* conv.kernel_size[1]
* out_h
* out_w
/ layer.condense_factor
)
flops = count1 + count2
# non-linearities:
elif layer_type in ["ReLU", "ReLU6", "Tanh", "Sigmoid", "Softmax", "SiLU"]:
flops = x.numel()
# 2D pooling layers:
elif layer_type in ["AvgPool2d", "MaxPool2d"]:
in_h = x.size()[2]
in_w = x.size()[3]
if isinstance(layer.kernel_size, int):
layer.kernel_size = (layer.kernel_size, layer.kernel_size)
kernel_ops = layer.kernel_size[0] * layer.kernel_size[1]
out_h = 1 + int(
(in_h + 2 * layer.padding - layer.kernel_size[0]) / layer.stride
)
out_w = 1 + int(
(in_w + 2 * layer.padding - layer.kernel_size[1]) / layer.stride
)
flops = x.size()[0] * x.size()[1] * out_w * out_h * kernel_ops
# adaptive avg pool2d
# This is approximate and works only for downsampling without padding
# based on aten/src/ATen/native/AdaptiveAveragePooling.cpp
elif layer_type in ["AdaptiveAvgPool2d"]:
in_h = x.size()[2]
in_w = x.size()[3]
if isinstance(layer.output_size, int):
out_h, out_w = layer.output_size, layer.output_size
elif len(layer.output_size) == 1:
out_h, out_w = layer.output_size[0], layer.output_size[0]
else:
out_h, out_w = layer.output_size
if out_h > in_h or out_w > in_w:
raise ClassyProfilerNotImplementedError(layer)
batchsize_per_replica = x.size()[0]
num_channels = x.size()[1]
kh = in_h - out_h + 1
kw = in_w - out_w + 1
kernel_ops = kh * kw
flops = batchsize_per_replica * num_channels * out_h * out_w * kernel_ops
# linear layer:
elif layer_type in ["Linear"]:
weight_ops = layer.weight.numel()
bias_ops = layer.bias.numel() if layer.bias is not None else 0
flops = ((x.numel() / x.size(-1)) if x.ndim > 2 else x.size(0)) * (
weight_ops + bias_ops
)
# batch normalization / layer normalization:
elif layer_type in [
"BatchNorm1d",
"BatchNorm2d",
"BatchNorm3d",
"SyncBatchNorm",
"LayerNorm",
]:
flops = 2 * x.numel()
# 3D convolution
elif layer_type in ["Conv3d"]:
out_t = int(
(x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0])
// layer.stride[0]
+ 1
)
out_h = int(
(x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1])
// layer.stride[1]
+ 1
)
out_w = int(
(x.size()[4] + 2 * layer.padding[2] - layer.kernel_size[2])
// layer.stride[2]
+ 1
)
flops = (
batchsize_per_replica
* layer.in_channels
* layer.out_channels
* layer.kernel_size[0]
* layer.kernel_size[1]
* layer.kernel_size[2]
* out_t
* out_h
* out_w
/ layer.groups
)
# 3D pooling layers
elif layer_type in ["AvgPool3d", "MaxPool3d"]:
in_t = x.size()[2]
in_h = x.size()[3]
in_w = x.size()[4]
if isinstance(layer.kernel_size, int):
layer.kernel_size = (
layer.kernel_size,
layer.kernel_size,
layer.kernel_size,
)
if isinstance(layer.padding, int):
layer.padding = (layer.padding, layer.padding, layer.padding)
if isinstance(layer.stride, int):
layer.stride = (layer.stride, layer.stride, layer.stride)
kernel_ops = layer.kernel_size[0] * layer.kernel_size[1] * layer.kernel_size[2]
out_t = 1 + int(
(in_t + 2 * layer.padding[0] - layer.kernel_size[0]) / layer.stride[0]
)
out_h = 1 + int(
(in_h + 2 * layer.padding[1] - layer.kernel_size[1]) / layer.stride[1]
)
out_w = 1 + int(
(in_w + 2 * layer.padding[2] - layer.kernel_size[2]) / layer.stride[2]
)
flops = batchsize_per_replica * x.size()[1] * out_t * out_h * out_w * kernel_ops
# adaptive avg pool3d
# This is approximate and works only for downsampling without padding
# based on aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
elif layer_type in ["AdaptiveAvgPool3d"]:
in_t = x.size()[2]
in_h = x.size()[3]
in_w = x.size()[4]
out_t = layer.output_size[0]
out_h = layer.output_size[1]
out_w = layer.output_size[2]
if out_t > in_t or out_h > in_h or out_w > in_w:
raise ClassyProfilerNotImplementedError(layer)
batchsize_per_replica = x.size()[0]
num_channels = x.size()[1]
kt = in_t - out_t + 1
kh = in_h - out_h + 1
kw = in_w - out_w + 1
kernel_ops = kt * kh * kw
flops = (
batchsize_per_replica * num_channels * out_t * out_w * out_h * kernel_ops
)
elif layer_type in ["Dropout", "Identity"]:
flops = 0
elif hasattr(layer, "flops"):
# If the module already defines a method to compute flops with the signature
# below, we use it to compute flops
#
# Class MyModule(nn.Module):
# def flops(self, x):
# ...
# or
#
# Class MyModule(nn.Module):
# def flops(self, x1, x2):
# ...
flops = layer.flops(*layer_args)
if flops is None:
raise ClassyProfilerNotImplementedError(layer)
message = [
f"module type: {typestr}",
f"input size: {get_shape(x)}",
f"output size: {get_shape(y)}",
f"params(M): {count_params(layer) / 1e6}",
f"flops(M): {int(flops) / 1e6}",
]
logging.debug("\t".join(message))
return int(flops)
def _layer_activations(layer: nn.Module, layer_args: List[Any], out: Any) -> int:
"""
Computes the number of activations produced by a single layer.
Activations are counted only for convolutional and linear layers. To override this
behavior, a layer can define a method to compute activations with the signature
below, which will be used to compute the activations instead.
Class MyModule(nn.Module):
def activations(self, out, *layer_args):
...
"""
typestr = layer.__repr__()
if hasattr(layer, "activations"):
activations = layer.activations(out, *layer_args)
elif isinstance(layer, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)):
activations = out.numel()
else:
return 0
message = [f"module: {typestr}", f"activations: {activations}"]
logging.debug("\t".join(message))
return int(activations)
def summarize_profiler_info(prof: torch.autograd.profiler.profile) -> str:
"""
Summarizes the statistics in the specified profiler.
"""
# create sorted list of times per operator:
op2time = {}
for item in prof.key_averages():
op2time[item.key] = (
item.cpu_time_total / 1000.0,
item.cuda_time_total / 1000.0,
) # to milliseconds
op2time = sorted(op2time.items(), key=operator.itemgetter(1), reverse=True)
# created string containing information:
str = "\n%s\tCPU Time\tCUDA Time\n" % ("Key".rjust(20))
for (key, value) in op2time:
str += "%s\t%2.5f ms\t%2.5f ms\n" % (key.rjust(20), value[0], value[1])
return str
class ComplexityComputer:
def __init__(self, compute_fn: Callable, count_unique: bool):
self.compute_fn = compute_fn
self.count_unique = count_unique
self.count = 0
self.seen_modules = set()
def compute(
self, layer: nn.Module, layer_args: List[Any], out: Any, module_name: str
):
if self.count_unique and module_name in self.seen_modules:
return
self.count += self.compute_fn(layer, layer_args, out)
logging.debug(f"module name: {module_name}, count {self.count}")
self.seen_modules.add(module_name)
def reset(self):
self.count = 0
self.seen_modules.clear()
def _patched_computation_module(
module: nn.Module, complexity_computer: ComplexityComputer, module_name: str
):
"""
Patch the module to compute a module's parameters, like FLOPs.
Calls compute_fn and passes the results to the complexity computer.
"""
ty = type(module)
typestring = module.__repr__()
class ComputeModule(ty):
orig_type = ty
def _original_forward(self, *args, **kwargs):
return ty.forward(self, *args, **kwargs)
def forward(self, *args, **kwargs):
out = self._original_forward(*args, **kwargs)
complexity_computer.compute(self, list(args), out, module_name)
return out
def __repr__(self):
return typestring
return ComputeModule
def modify_forward(
model: nn.Module,
complexity_computer: ComplexityComputer,
prefix: str = "",
patch_attr: str = None,
) -> nn.Module:
"""
Modify forward pass to measure a module's parameters, like FLOPs.
"""
# Recursively update all the modules in the model. A module is patched if it
# contains the patch_attr (like the flops() function for FLOPs computation) or it is
# a leaf. We stop recursing if we patch a module since that module is supposed
# to return the results for all its children as well.
# Since this recursion can lead to the same module being patched through different
# paths, we make sure we only patch un-patched modules.
if hasattr(model, "orig_type"):
return model
if is_leaf(model) or (patch_attr is not None and hasattr(model, patch_attr)):
model.__class__ = _patched_computation_module(
model, complexity_computer, prefix
)
else:
for name, child in model.named_children():
modify_forward(
child,
complexity_computer,
prefix=f"{prefix}.{name}",
patch_attr=patch_attr,
)
return model
def restore_forward(model: nn.Module, patch_attr: str = None) -> nn.Module:
"""
Restore original forward in model.
"""
for module in model.modules():
if hasattr(module, "orig_type"):
# module has been patched; un-patch it
module.__class__ = module.orig_type
return model
def compute_complexity(
model: nn.Module,
compute_fn: Callable,
input_shape: Tuple[int],
input_key: Optional[Union[str, List[str]]] = None,
patch_attr: str = None,
compute_unique: bool = False,
) -> int:
"""
Compute the complexity of a forward pass.
Args:
compute_unique: If True, the compexity for a given module is only calculated
once. Otherwise, it is counted every time the module is called.
TODO(@mannatsingh): We have some assumptions about only modules which are leaves
or have patch_attr defined. This should be fixed and generalized if possible.
"""
# assertions, input, and upvalue in which we will perform the count:
assert isinstance(model, nn.Module)
if not isinstance(input_shape, abc.Sequence) and not isinstance(input_shape, dict):
return None
else:
input = get_model_dummy_input(model, input_shape, input_key)
complexity_computer = ComplexityComputer(compute_fn, compute_unique)
# measure FLOPs:
modify_forward(model, complexity_computer, patch_attr=patch_attr)
try:
# compute complexity in eval mode
with eval_model(model), torch.no_grad():
model.forward(input)
finally:
restore_forward(model, patch_attr=patch_attr)
return complexity_computer.count
def compute_flops(
model: nn.Module,
input_shape: Tuple[int] = (3, 224, 224),
input_key: Optional[Union[str, List[str]]] = None,
) -> int:
"""
Compute the number of FLOPs needed for a forward pass.
"""
return compute_complexity(
model,
_layer_flops,
input_shape,
input_key,
patch_attr="flops",
)
def compute_activations(
model: nn.Module,
input_shape: Tuple[int] = (3, 224, 224),
input_key: Optional[Union[str, List[str]]] = None,
) -> int:
"""
Compute the number of activations created in a forward pass.
"""
return compute_complexity(
model,
_layer_activations,
input_shape,
input_key,
patch_attr="activations",
)
def count_params(model: nn.Module) -> int:
"""
Count the number of parameters in a model.
"""
assert isinstance(model, nn.Module)
return sum((parameter.nelement() for parameter in model.parameters()))
| ClassyVision-main | classy_vision/generic/profiler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import tempfile
from typing import Any, Callable, List, Tuple
import torch
# Default to GPU 0
_cuda_device_index: int = 0
# Setting _cuda_device_index to -1 internally implies that we should use CPU
_CPU_DEVICE_INDEX = -1
_PRIMARY_RANK = 0
def convert_to_distributed_tensor(tensor: torch.Tensor) -> Tuple[torch.Tensor, str]:
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This helper function converts to the correct
device and returns the tensor + original device.
"""
orig_device = "cpu" if not tensor.is_cuda else "gpu"
if (
torch.distributed.is_available()
and torch.distributed.get_backend() == torch.distributed.Backend.NCCL
and not tensor.is_cuda
):
tensor = tensor.cuda()
return (tensor, orig_device)
def convert_to_normal_tensor(tensor: torch.Tensor, orig_device: str) -> torch.Tensor:
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This converts the tensor back to original device.
"""
if tensor.is_cuda and orig_device == "cpu":
tensor = tensor.cpu()
return tensor
def is_distributed_training_run() -> bool:
return (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and (torch.distributed.get_world_size() > 1)
)
def is_primary() -> bool:
"""
Returns True if this is rank 0 of a distributed training job OR if it is
a single trainer job. Otherwise False.
"""
return get_rank() == _PRIMARY_RANK
def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing mean reduction
of tensor over all processes.
"""
return all_reduce_op(
tensor,
torch.distributed.ReduceOp.SUM,
lambda t: t / torch.distributed.get_world_size(),
)
def all_reduce_sum(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing sum
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
return all_reduce_op(tensor, torch.distributed.ReduceOp.SUM)
def all_reduce_min(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing min
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
return all_reduce_op(tensor, torch.distributed.ReduceOp.MIN)
def all_reduce_max(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing min
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
return all_reduce_op(tensor, torch.distributed.ReduceOp.MAX)
def all_reduce_op(
tensor: torch.Tensor,
op: torch.distributed.ReduceOp,
after_op_func: Callable[[torch.Tensor], torch.Tensor] = None,
) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.all_reduce(tensor, op)
if after_op_func is not None:
tensor = after_op_func(tensor)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor
def gather_tensors_from_all(tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Wrapper over torch.distributed.all_gather for performing
'gather' of 'tensor' over all processes in both distributed /
non-distributed scenarios.
"""
if tensor.ndim == 0:
# 0 dim tensors cannot be gathered. so unsqueeze
tensor = tensor.unsqueeze(0)
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
gathered_tensors = [
torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(gathered_tensors, tensor)
gathered_tensors = [
convert_to_normal_tensor(_tensor, orig_device)
for _tensor in gathered_tensors
]
else:
gathered_tensors = [tensor]
return gathered_tensors
def gather_from_all(tensor: torch.Tensor) -> torch.Tensor:
gathered_tensors = gather_tensors_from_all(tensor)
gathered_tensor = torch.cat(gathered_tensors, 0)
return gathered_tensor
def broadcast(tensor: torch.Tensor, src: int = 0) -> torch.Tensor:
"""
Wrapper over torch.distributed.broadcast for broadcasting a tensor from the source
to all processes in both distributed / non-distributed scenarios.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.broadcast(tensor, src)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor
def barrier() -> None:
"""
Wrapper over torch.distributed.barrier, returns without waiting
if the distributed process group is not initialized instead of throwing error.
"""
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return
torch.distributed.barrier()
def get_world_size() -> int:
"""
Simple wrapper for correctly getting worldsize in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_world_size()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 1
)
def get_rank() -> int:
"""
Simple wrapper for correctly getting rank in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_rank()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 0
)
def get_primary_rank() -> int:
return _PRIMARY_RANK
def set_cuda_device_index(idx: int) -> None:
global _cuda_device_index
_cuda_device_index = idx
torch.cuda.set_device(_cuda_device_index)
def set_cpu_device() -> None:
global _cuda_device_index
_cuda_device_index = _CPU_DEVICE_INDEX
def get_cuda_device_index() -> int:
return _cuda_device_index
def init_distributed_data_parallel_model(
model: torch.nn.Module,
broadcast_buffers: bool = False,
find_unused_parameters: bool = True,
bucket_cap_mb: int = 25,
) -> torch.nn.parallel.DistributedDataParallel:
global _cuda_device_index
if _cuda_device_index == _CPU_DEVICE_INDEX:
# CPU-only model, don't specify device
return torch.nn.parallel.DistributedDataParallel(
model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
bucket_cap_mb=bucket_cap_mb,
)
else:
# GPU model
return torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[_cuda_device_index],
output_device=_cuda_device_index,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
bucket_cap_mb=bucket_cap_mb,
)
def broadcast_object(obj: Any, src: int = _PRIMARY_RANK, use_disk: bool = True) -> Any:
"""Broadcast an object from a source to all workers.
Args:
obj: Object to broadcast, must be serializable
src: Source rank for broadcast (default is primary)
use_disk: If enabled, removes redundant CPU memory copies by writing to
disk
"""
# Either broadcast from primary to the fleet (default),
# or use the src setting as the original rank
if get_rank() == src:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
data_view = buffer.getbuffer()
length_tensor = torch.LongTensor([len(data_view)])
length_tensor = broadcast(length_tensor, src=src)
data_tensor = torch.ByteTensor(data_view)
data_tensor = broadcast(data_tensor, src=src)
else:
# Fetch from the source
length_tensor = torch.LongTensor([0])
length_tensor = broadcast(length_tensor, src=src)
data_tensor = torch.empty([length_tensor.item()], dtype=torch.uint8)
data_tensor = broadcast(data_tensor, src=src)
if use_disk:
with tempfile.TemporaryFile("r+b") as f:
f.write(data_tensor.numpy())
# remove reference to the data tensor and hope that Python garbage
# collects it
del data_tensor
f.seek(0)
obj = torch.load(f)
else:
buffer = io.BytesIO(data_tensor.numpy())
obj = torch.load(buffer)
return obj
| ClassyVision-main | classy_vision/generic/distributed_util.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .classy_hub_interface import ClassyHubInterface
__all__ = ["ClassyHubInterface"]
| ClassyVision-main | classy_vision/hub/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Iterator, List, Optional, Union
import torch
import torch.nn as nn
from classy_vision.dataset import ClassyDataset
from classy_vision.dataset.image_path_dataset import ImagePathDataset
from classy_vision.dataset.transforms import ClassyTransform
from classy_vision.dataset.transforms.util import build_field_transform_default_imagenet
from classy_vision.models import ClassyModel
from classy_vision.tasks import ClassyTask
class ClassyHubInterface:
"""PyTorch Hub interface for classy vision tasks and models.
The task is optional, but a model is guaranteed to be present. Do
not use the constructor directly, instead Use from_task() or
from_model() to instantiate the class.
See the examples folder for an example of how to use this class
Attributes:
task: If present, task that can be used to train the torchhub model
model: torchub model
"""
def __init__(
self, task: Optional[ClassyTask] = None, model: Optional[ClassyModel] = None
) -> None:
"""Constructor for ClassyHubInterface.
Only one of task or model can be specified at construction
time. If task is specified then task.model is used to populate
the model attribute.
Do not use the constructor directly, instead use from_task()
or from_model() to instantiate the class.
Args:
task: task that can be used to train torchhub model,
task.model is used to populate the model attribute
model: torchhub model
"""
self.task = task
if task is None:
assert model is not None, "Need to specify a model if task is None"
self.model = model
else:
assert model is None, "Cannot pass a model if task is not None"
self.model = task.model
@classmethod
def from_task(cls, task: ClassyTask) -> "ClassyHubInterface":
"""Instantiates the ClassyHubInterface from a task.
This function returns a hub interface based on a ClassyTask.
Args:
task: ClassyTask that contains hub model
"""
return cls(task=task)
@classmethod
def from_model(cls, model: Union[nn.Module, ClassyModel]) -> "ClassyHubInterface":
"""Instantiates the ClassyHubInterface from a model.
This function returns a hub interface based on a ClassyModel
Args:
model: torchhub model
"""
if not isinstance(model, ClassyModel):
model = ClassyModel.from_model(model)
return cls(model=model)
def create_image_dataset(
self,
batchsize_per_replica: int = 32,
shuffle: bool = True,
transform: Optional[Union[ClassyTransform, Callable]] = None,
num_samples: Optional[int] = None,
image_folder: Optional[str] = None,
image_files: Optional[List[str]] = None,
phase_type: str = "train",
) -> ClassyDataset:
"""Create a ClassyDataset which reads images from image_paths.
See :class:`dataset.ImagePathDataset` for documentation on image_folder and
image_files
Args:
batchsize_per_replica: Minibatch size per replica (i.e. samples per GPU)
shuffle: If true, data is shuffled between epochs
transform: Transform to apply to sample. If left as None, the dataset's
phase_type is used to determine the transform to apply. The transform
for the phase_type is searched for in self.task, falling back to
imagenet transformations if it is not found there.
num_samples: If specified, limits the number of samples returned by
the dataset
phase_type: String specifying the phase_type, e.g. "train" or "test"
"""
if transform is None:
if self.task is not None and phase_type in self.task.datasets:
# use the transform from the dataset for the phase_type
dataset = self.task.datasets[phase_type]
transform = dataset.transform
assert transform is not None, "Cannot infer transform from the task"
else:
transform = build_field_transform_default_imagenet(
config=None, split=phase_type, key_map_transform=None
)
return ImagePathDataset(
batchsize_per_replica,
shuffle,
transform,
num_samples,
image_folder=image_folder,
image_files=image_files,
)
@staticmethod
def get_data_iterator(dataset: ClassyDataset) -> Iterator[Any]:
"""Returns an iterator that can be used to retrieve training / testing samples.
Args:
dataset: Dataset to iterate over
"""
return iter(dataset.iterator())
def train(self) -> None:
"""Sets the model to train mode and enables torch gradient calculation"""
torch.autograd.set_grad_enabled(True)
self.model.train()
def eval(self) -> None:
"""Sets the model to eval mode and disables torch gradient calculation"""
torch.autograd.set_grad_enabled(False)
self.model.eval()
def predict(self, sample):
"""Returns the model's prediction for a sample.
Args:
sample: Must contain "input" key, model calculates prediction over input.
"""
output = self.model(sample["input"])
# squeeze the output in case the batch size is 1
return output.squeeze()
def extract_features(self, sample):
"""Calculates feature embeddings of sample.
Args:
sample: Must contain "input" key, model calculates prediction over input.
"""
output = self.model.extract_features(sample["input"])
# squeeze the output in case the batch size is 1
return output.squeeze()
| ClassyVision-main | classy_vision/hub/classy_hub_interface.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .classy_trainer import ClassyTrainer
from .distributed_trainer import DistributedTrainer
from .local_trainer import LocalTrainer
__all__ = ["ClassyTrainer", "DistributedTrainer", "LocalTrainer"]
| ClassyVision-main | classy_vision/trainer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from classy_vision.generic.distributed_util import barrier
from classy_vision.tasks import ClassyTask
class ClassyTrainer:
"""Base class for shared training code.
A trainer is responsible for setting up the environment for
training, for instance: configuring rendezvous for distributed
training, deciding what GPU to use and so on. Trainers also
control the outer portion of the training loop, but delegate to
the task to decide how exactly to perform inference, compute loss
etc. That allows combining tasks with different trainers depending
on whether you want to train on your current machine, AWS cluster
etc.
"""
def train(self, task: ClassyTask):
"""Runs training phases, phases are generated from the config.
Args:
task: Task to be used in training. It should contain
everything that is needed for training
"""
task.prepare()
assert isinstance(task, ClassyTask)
# make sure all the workers start training at the same time
# this helps catch hangs which would have happened elsewhere
barrier()
task.on_start()
while not task.done_training():
task.on_phase_start()
while True:
try:
task.step()
except StopIteration:
break
task.on_phase_end()
task.on_end()
| ClassyVision-main | classy_vision/trainer/classy_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import torch
from classy_vision.generic.distributed_util import (
get_rank,
get_world_size,
set_cpu_device,
set_cuda_device_index,
)
from .classy_trainer import ClassyTrainer
def _init_env_vars(use_gpu: bool):
"""Function sets up default environment variables for distributed training.
Args:
use_gpu: If true, set NCCL environment for GPUs
"""
if "WORLD_SIZE" not in os.environ or "RANK" not in os.environ:
os.environ["WORLD_SIZE"] = "1"
os.environ["RANK"] = "0"
os.environ["LOCAL_RANK"] = "0"
if "MASTER_ADDR" not in os.environ or "MASTER_PORT" not in os.environ:
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
if use_gpu:
# From https://github.com/pytorch/elastic/blob/4175e9ec3ac346b89dab13eeca00e8f00b6daa8f/examples/imagenet/main.py#L156 # noqa B950
# when using NCCL, on failures, surviving nodes will deadlock on NCCL ops
# because NCCL uses a spin-lock on the device. Set this env var to enable a
# watchdog thread that will destroy stale NCCL communicators, and
# asynchronously handle NCCL errors and timed out collectives.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
def _init_distributed(use_gpu: bool):
"""Function perform distributed setup for DDP.
Requires the script to be started with torch.distributed.launch
script and uses environment variables for node finding.
Args:
use_gpu: If true, use distributed GPU training, else use CPU
"""
distributed_world_size = int(os.environ["WORLD_SIZE"])
distributed_rank = int(os.environ["RANK"])
backend = "nccl" if use_gpu else "gloo"
torch.distributed.init_process_group(
backend=backend,
init_method="env://",
world_size=distributed_world_size,
rank=distributed_rank,
)
class DistributedTrainer(ClassyTrainer):
"""Distributed trainer for using multiple training processes"""
def train(self, task):
_init_env_vars(task.use_gpu)
_init_distributed(task.use_gpu)
logging.info(
f"Done setting up distributed process_group with rank {get_rank()}"
+ f", world_size {get_world_size()}"
)
local_rank = int(os.environ["LOCAL_RANK"])
if task.use_gpu:
logging.info("Using GPU, CUDA device index: {}".format(local_rank))
set_cuda_device_index(local_rank)
else:
logging.info("Using CPU")
set_cpu_device()
super().train(task)
| ClassyVision-main | classy_vision/trainer/distributed_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
from classy_vision.generic.distributed_util import set_cpu_device, set_cuda_device_index
from .classy_trainer import ClassyTrainer
class LocalTrainer(ClassyTrainer):
"""Trainer to be used if you want want use only a single training process."""
def train(self, task):
if task.use_gpu:
logging.info("Using GPU, CUDA device index: {}".format(0))
set_cuda_device_index(0)
else:
logging.info("Using CPU")
set_cpu_device()
super().train(task)
| ClassyVision-main | classy_vision/trainer/local_trainer.py |
ClassyVision-main | classy_vision/hydra/__init__.py |
|
ClassyVision-main | classy_vision/hydra/conf/__init__.py |
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json,sys,os
output_path = 'output/'
def json_save(path, obj):
with open(path, 'w') as f:
json.dump(obj, f)
def os_mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def makePartialJsons(obj):
for k in obj['data']:
os_mkdir(output_path)
output_f = os.path.join(output_path, k+'.json')
json_save(output_f, obj['data'][k])
print(k)
if __name__=='__main__' and len(sys.argv) > 1:
path = sys.argv[1]
with open(path, 'r') as f:
obj = json.load(f)
makePartialJsons(obj)
| CoDraw-master | script/preprocess.py |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='torchhalp',
version='0.0.1',
description='PyTorch implementation of High-Accuracy Low-Precision Training',
author= 'Megan Leszczynski',
packages=find_packages(),
)
| torchhalp-master | setup.py |
import numpy as np
import pytest
import torch
from torch.autograd import Variable
from examples import regression
from torchhalp.optim import SVRG
from utils import *
np.random.seed(0xdeadbeef)
#========================================================================================
# SVRG implementations
#========================================================================================
def baseline_svrg(x, y, w, lr, n, T=1, K=1, calc_gradient=None):
iters = 0
for k in range(K):
for idx in range(n):
if iters % T == 0:
w_prev = w
full_grad = calc_gradient(x, y, w, avg=True)
xi, yi = x[[idx],:], y[idx:idx+1]
w_grad = calc_gradient(xi, yi, w)
w_prev_grad = calc_gradient(xi, yi, w_prev)
adjusted_grad = w_grad - w_prev_grad + full_grad
w = w - (lr*adjusted_grad)
iters += 1
return w
def pytorch_svrg(x, y, w, lr, T, K=1, n_features=None, n_classes=1):
model = regression.utils.build_model(n_features, n_classes, initial_value=w)
x = torch.from_numpy(x).float()
# linear regression
if n_classes == 1:
y = torch.from_numpy(y).float().view(-1,1)
loss = torch.nn.MSELoss()
else: # multiclass logistic
y = torch.from_numpy(y).long()
loss = torch.nn.CrossEntropyLoss()
synth_dataset = regression.utils.SynthDataset(x, y)
train_loader = torch.utils.data.DataLoader(synth_dataset)
svrg_opt = SVRG(model.parameters(), lr=lr, T=T, data_loader=train_loader)
for k in range(K):
for i, (data, target) in enumerate(train_loader):
def closure(data=data, target=target):
data = Variable(data, requires_grad=False)
target = Variable(target, requires_grad=False)
output = model(data)
cost = loss(output, target)
cost.backward()
return cost
svrg_opt.step(closure)
w = np.asarray([p.data.numpy() for p in
list(model.parameters())]).reshape(n_classes, n_features)
return w
#========================================================================================
# Tests
#========================================================================================
@pytest.mark.parametrize("n_samples,n_features,lr,K,T",
[
(1, 1, 1, 1, 1),
(1, 4, 0.1, 1, 1),
(1, 4, 0.1, 2, 1),
(10, 4, 0.1, 1, 1),
(10, 10, 0.1, 1, 1),
(10, 10, 0.5, 1, 1),
(10, 10, 0.5, 2, 1),
(1, 1, 1, 1, 2),
(1, 4, 0.1, 1, 2),
(1, 4, 0.1, 2, 2),
(10, 4, 0.1, 1, 2),
(10, 10, 0.1, 1, 10),
(10, 10, 0.5, 2, 10),
(10, 10, 0.1, 1, 20),
(10, 10, 0.5, 1, 20),
(10, 10, 0.5, 2, 20)
])
def test_linear_regress(n_samples, n_features, lr, K, T):
x = np.random.rand(n_samples, n_features)
y = np.random.uniform(0,1, size=(n_samples,))
w = np.random.uniform(0, 0.1, (1, n_features))
print "K", K
np_value = baseline_svrg(x, y, w, lr, n=n_samples, T=T, K=K, calc_gradient=linear_grad)
pytorch_value = pytorch_svrg(x, y, w, lr, T=T, K=K, n_features=n_features)
np.testing.assert_allclose(np_value, pytorch_value, rtol=1e-4)
@pytest.mark.parametrize("n_samples,n_features,n_classes,lr,K",
[
(1, 1, 3, 1, 1),
(1, 4, 3, 0.1, 1),
(1, 4, 3, 0.1, 2),
(2, 4, 3, 0.1, 1),
(2, 4, 3, 0.5, 1),
(2, 4, 3, 0.5, 2),
(2, 4, 4, 0.5, 2),
])
def test_logistic_regress(n_samples, n_features, n_classes, lr, K):
x = np.random.rand(n_samples, n_features)
y = np.random.randint(0, n_classes, size=(n_samples,))
w = np.random.uniform(0, 0.1, (n_classes, n_features))
np_value = baseline_svrg(x, y, w, lr, n=n_samples, T=n_samples, K=K, calc_gradient=logistic_grad)
pytorch_value = pytorch_svrg(x, y, w, lr, T=n_samples, K=K, n_features=n_features,
n_classes=n_classes)
np.testing.assert_allclose(np_value, pytorch_value, rtol=1e-4)
| torchhalp-master | test/test_svrg.py |
torchhalp-master | test/__init__.py |
|
import pytest
import numpy as np
import torch
from torch.autograd import Variable
from utils import *
from torchhalp.optim import HALP
from examples import regression
np.random.seed(0xdeadbeef)
#========================================================================================
# Helpers
#========================================================================================
def quantize(vect, b, scale_factor, biased=False):
if not biased:
random_vect = np.random.uniform(0, 1, size=vect.shape)
vect = np.floor((vect/float(scale_factor)) + random_vect)
else:
vect = np.floor(vect/float(scale_factor) + 0.5)
min_value = -1 * (2**(b-1))
max_value = 2**(b-1) - 1
vect = np.clip(vect, min_value, max_value)
return vect
def dequantize(vect, scale_factor):
return vect*scale_factor
#========================================================================================
# HALP implementations
#========================================================================================
def baseline_halp(x, y, w, lr, b, mu, n, T=1, K=1, calc_gradient=None):
s_k = 1.0 # s_k needs an initial value to complete w addition
z = np.zeros(w.shape)
iters = 0
for k in range(K):
for idx in range(n):
if iters % T == 0:
# Recenter
w = w + z # w is full precision
g_k = calc_gradient(x, y, w, avg=True)
# Rescale
s_k = float(np.linalg.norm(g_k)) / (mu * (2**(b-1) - 1))
z = np.zeros(w.shape)
xi, yi = x[[idx],:], y[idx:idx+1]
z = z - (lr*(calc_gradient(xi, yi, w + z) - calc_gradient(xi, yi, w) + g_k))
z = quantize(z, b, s_k, biased=True)
z = dequantize(z, s_k)
iters += 1
return w + z
def pytorch_halp(x, y, w, lr, b, mu, T=1, K=1, n_features=None, n_classes=1):
model = regression.utils.build_model(n_features, n_classes, initial_value=w)
x = torch.from_numpy(x).float()
# Linear regression
if n_classes == 1:
y = torch.from_numpy(y).float().view(-1,1)
loss = torch.nn.MSELoss()
else: # Multiclass logistic
y = torch.from_numpy(y).long()
loss = torch.nn.CrossEntropyLoss()
synth_dataset = regression.utils.SynthDataset(x, y)
train_loader = torch.utils.data.DataLoader(synth_dataset)
halp_opt = HALP(model.parameters(), lr=lr, T=T, data_loader=train_loader, bits=b, mu=mu, biased=True)
for k in range(K):
for i, (data, target) in enumerate(train_loader):
def closure(data=data, target=target):
data = Variable(data, requires_grad=False)
target = Variable(target, requires_grad=False)
output = model(data)
cost = loss(output, target)
cost.backward()
return cost
halp_opt.step(closure)
w = np.asarray([p.data.numpy() for p in
list(model.parameters())]).reshape(n_classes, n_features)
return w
#========================================================================================
# Tests
#========================================================================================
@pytest.mark.parametrize("n_samples,n_features,lr,K,b,mu,T",
[
(1, 1, 1, 1, 8, 1, 1),
(1, 4, 0.1, 1, 8, 1, 1),
(1, 4, 0.1, 4, 8, 1, 2),
(10, 4, 0.1, 1, 8, 1, 10),
(10, 4, 0.1, 1, 8, 1, 10),
(10, 10, 0.1, 1, 8, 1, 10),
(10, 10, 0.5, 1, 8, 1, 10),
(10, 10, 0.5, 10, 8, 1, 10),
(10, 10, 0.5, 10, 8, 0.1, 10),
(10, 10, 0.5, 10, 16, 0.1, 10),
(5, 10, 0.5, 10, 16, 1, 5),
(10, 4, 0.1, 1, 8, 1, 20),
(10, 4, 0.1, 1, 8, 1, 20),
(10, 10, 0.1, 1, 8, 1, 20),
(10, 10, 0.5, 1, 8, 1, 20),
(10, 10, 0.5, 10, 8, 1, 20),
(10, 10, 0.5, 10, 8, 0.1, 20),
(10, 10, 0.5, 10, 16, 0.1, 20)
])
def test_linear_regress(n_samples, n_features, lr, K, b, mu, T):
x = np.random.rand(n_samples, n_features)
y = np.random.uniform(0,1, size=(n_samples,))
w = np.random.uniform(0,0.1, (1, n_features))
np_value = baseline_halp(x, y, w, lr, b, mu, n=n_samples, T=T, K=K, calc_gradient=linear_grad)
pytorch_value = pytorch_halp(x, y, w, lr, b, mu, T=T, K=K, n_features=n_features)
np.testing.assert_allclose(np_value, pytorch_value, rtol=1e-4)
@pytest.mark.parametrize("n_samples,n_features,n_classes,lr,K,b,mu",
[
(1, 1, 3, 1, 1, 8, 1),
(1, 4, 3, 0.1, 1, 8, 1),
(1, 4, 3, 0.1, 2, 8, 1),
(2, 4, 3, 0.1, 1, 8, 1),
(2, 4, 3, 0.5, 1, 8, 1),
(2, 4, 3, 0.5, 2, 8, 1),
(2, 4, 4, 0.5, 2, 8, 1),
(2, 4, 4, 0.5, 2, 8, 0.1),
(2, 4, 4, 0.5, 2, 16,0.1),
(2, 4, 4, 0.5, 2, 16, 1)
])
def test_logistic_regress(n_samples, n_features, n_classes, lr, K, b, mu):
x = np.random.rand(n_samples, n_features)
y = np.random.randint(0, n_classes, size=(n_samples,))
w = np.random.uniform(0, 0.1, (n_classes, n_features))
np_value = baseline_halp(x, y, w, lr, b, mu, n=n_samples, T=n_samples, K=K, calc_gradient=logistic_grad)
pytorch_value = pytorch_halp(x, y, w, lr, b, mu, T=n_samples, K=K, n_features=n_features,
n_classes=n_classes)
np.testing.assert_allclose(np_value, pytorch_value, rtol=1e-4)
| torchhalp-master | test/test_halp.py |
import math
import torch
import pytest
import numpy as np
from utils import iter_indices
import torchhalp.quantize
def check_saturation(m1, scale_factor, bits):
min_val = -scale_factor*math.pow(2, bits-1)
max_val = scale_factor*(math.pow(2, bits-1) - 1)
m2 = m1.clone()
for i in iter_indices(m2):
m2[i] = max(min_val, min(max_val, m2[i]))
np.testing.assert_equal(m1.numpy(), m2.numpy())
def check_quantization(m1, scale_factor, bits):
# Test that quantized value is in it's range
check_saturation(m1, scale_factor, bits)
# Test that quantized value is representable
m2 = m1.clone()
for i in iter_indices(m2):
# Must be an integer in the fixed-point representation
m2[i] = round(m2[i] / scale_factor) * scale_factor
np.testing.assert_allclose(m1.numpy(), m2.numpy(), rtol=1e-6)
@pytest.mark.parametrize("scale_factor,bits",
[
(0.05, 8),
(5e-5, 16),
(5e-9, 32)
])
def test_quantization(scale_factor, bits):
# Create a matrix with 100 values randomly uniform within [-15, 15]
m1 = torch.rand(100).mul(30).add(-15)
m1.quantize_(scale_factor, bits)
check_quantization(m1, scale_factor, bits)
@pytest.mark.parametrize("scale_factor,bits",
[
(0.05, 8),
(5e-5, 16),
(5e-9, 32)
])
def test_saturation(scale_factor, bits):
m1 = torch.rand(100).mul(30).add(-15) # uniform [-15, 15]
m1.saturate_(scale_factor, bits)
# Test that saturated value is in it's range
check_saturation(m1, scale_factor, bits)
| torchhalp-master | test/test_quantize.py |
import numpy as np
from itertools import product
def stablesoftmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - np.max(x, axis=1).reshape((-1,1))
exps = np.exp(shiftx)
return exps / np.sum(exps, axis=1).reshape(-1,1)
def logistic_grad(x, y, w, avg=False):
"""Compute the gradient for multi-class logistic regression"""
xi_dot_w = np.dot(x, w.T)
pred = stablesoftmax(xi_dot_w)
for i in range(len(x)):
pred[i][y[i]] = pred[i][y[i]] - 1
grad = np.dot(pred.T, x)
if avg:
grad = grad / float(len(x))
return grad
def linear_grad(x, y, w, avg=False):
"""Compute the gradient for linear regression"""
xi_dot_w = np.dot(x, w.T)
grad = 2*np.dot(xi_dot_w.T - y.T, x)
if avg:
grad = grad / float(len(x))
return grad
# https://github.com/pytorch/pytorch/blob/master/test/common.py
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size())) | torchhalp-master | test/utils.py |
torchhalp-master | examples/__init__.py |
|
from utils import build_model, SynthDataset
| torchhalp-master | examples/regression/__init__.py |
import torch
import torch.utils.data as data
class SynthDataset(data.Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.data[idx], self.labels[idx]
def build_model(input_dim, output_dim=1, initial_value=None):
model = torch.nn.Sequential()
module = torch.nn.Linear(input_dim, output_dim, bias=False)
if initial_value is not None:
module.weight.data = torch.from_numpy(initial_value).type(torch.FloatTensor)
model.add_module("linear", module)
else:
model.add_module("linear", torch.nn.Linear(input_dim, output_dim, bias=False))
return model | torchhalp-master | examples/regression/utils.py |
import torch
import torch.utils.data as data
from torch.autograd import Variable
from torch import optim
import numpy as np
import argparse
from sklearn import linear_model, datasets
from utils import SynthDataset
from torchhalp.optim import SVRG, HALP
import matplotlib
matplotlib.use('pdf') # uncomment to run on raiders9
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser(description='Linear regression')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default= 0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--T', type=int, default=200, metavar='T',
help='how many iterations between taking full gradient')
parser.add_argument('--mu', default=4, type=float,
help='mu, only used for HALP')
parser.add_argument('--bits', '-b', default=8, type=int,
help='Number of bits to use, only used for HALP')
parser.add_argument('--n', type=int, default=100, metavar='NS',
help='number of samples')
parser.add_argument('--num-features', type=int, default=5, metavar='F',
help='number of features')
parser.add_argument('--sgd', action='store_true',
help='Runs stochastic gradient descent')
parser.add_argument('--svrg', action='store_true',
help='Runs SVRG')
parser.add_argument('--halp', action='store_true',
help='Runs HALP algorithm')
parser.add_argument('--all', action='store_true',
help='Runs all optimizer algorithms')
parser.add_argument('--save_graph', action='store_true',
help='Saves a graph of the results.')
return parser.parse_args()
def add_plot(iters, dist, label, log_y=True, T=None):
if log_y:
plt.plot = plt.semilogy
plt.figure(0)
plt.plot(range(iters), dist, label=label)
# https://discuss.pytorch.org/t/adaptive-learning-rate/320/23
def step_decay_lr_scheduler(optimizer, epoch, lr_decay=0.1, lr_decay_epoch=7):
"""Decay learning rate by a factor of lr_decay every lr_decay_epoch epochs"""
if epoch % lr_decay_epoch:
return
for param_group in optimizer.param_groups:
param_group['lr'] *= lr_decay
def main():
args = parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
n = args.n
n_features = args.num_features
num_epochs = args.epochs
T = args.T
# Make synthetic dataset with sklearn
X, Y = datasets.make_regression(n_samples=n,
n_features=n_features,
random_state=0xc0ffee)
# Solve for optimal solution
w_opt, _, _, _= np.linalg.lstsq(X, Y, rcond=None)
# Make dataloader
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).float().view(-1,1)
synth_dataset = SynthDataset(X, Y)
train_loader = torch.utils.data.DataLoader(synth_dataset, shuffle=True)
loss = torch.nn.MSELoss()
def build_model():
# Create model
model = torch.nn.Sequential()
model.add_module("linear", torch.nn.Linear(n_features, 1, bias=False))
model.linear.weight.data.fill_(0.0)
if args.cuda:
model.cuda()
return model
def train(optimizer, lr_decay=False):
# Training
dist_to_optimum = []
iters = 0
for e in range(num_epochs):
for i, (data, target) in enumerate(train_loader):
# We need to add this function to models when we want to use SVRG or HALP
def closure(data=data, target=target):
data = Variable(data, requires_grad=False)
target = Variable(target, requires_grad=False)
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
cost = loss(output, target)
cost.backward()
return cost
# We need to zero the optimizer for SGD
# (already done internally for SVRG and HALP)
optimizer.zero_grad()
# This is the key line to perform the optimizer step
# We don't need to call forward/backward explicitly (in addition to in the closure)
# since the optimizer will call the closure
optimizer.step(closure)
# Performance metric: distance to optimum
w = np.asarray([p.data.cpu().numpy() for p in list(model.parameters())])
dist = np.linalg.norm(w-w_opt)
dist_to_optimum.append(dist)
if iters % n == 0:
print("Iteration = %d, Dist_to_opt = %s" % (iters , dist))
iters += 1
if lr_decay:
step_decay_lr_scheduler(optimizer, e, lr_decay=0.1, lr_decay_epoch=200)
return dist_to_optimum
# Optimizer
if args.sgd or args.all:
model = build_model()
opt = optim.SGD(model.parameters(), lr=args.lr)
dist = train(opt, lr_decay=False)
add_plot(num_epochs*len(train_loader), dist, label='SGD')
if args.svrg or args.all:
model = build_model()
opt = SVRG(model.parameters(), T=T, data_loader=train_loader, lr=args.lr)
dist = train(opt)
add_plot(num_epochs*len(train_loader), dist, label='SVRG', T=T)
if args.halp or args.all:
model = build_model()
opt = HALP(model.parameters(), T=T, data_loader=train_loader, lr=args.lr, mu=args.mu, bits=args.bits)
dist = train(opt)
add_plot(num_epochs*len(train_loader), dist, label='HALP', T=T)
if args.save_graph:
plt.figure(0)
plt.ylabel('Distance to Optimum')
plt.xlabel('Iterations')
plt.legend(loc='best')
plt.savefig('results.svg')
if __name__ == "__main__":
main()
| torchhalp-master | examples/regression/main.py |
# MIT License
# Copyright (c) 2017 liukuang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# https://github.com/kuangliu/pytorch-cifar
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = ResNet18()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
# test()
| torchhalp-master | examples/cifar10/resnet.py |
# MIT License
# Copyright (c) 2017 liukuang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# https://github.com/kuangliu/pytorch-cifar
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| torchhalp-master | examples/cifar10/utils.py |
# MIT License
# Copyright (c) 2017 liukuang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# modified from https://github.com/kuangliu/pytorch-cifar to add support for SVRG and HALP
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import csv
from resnet import *
from utils import progress_bar
from torch.autograd import Variable
from torchhalp.optim import SVRG, HALP
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='Learning rate')
parser.add_argument('--T', type=int, help='T, only used for SVRG and HALP')
parser.add_argument('--mu', default=1, type=float, help='mu, only used for HALP')
parser.add_argument('--bits', '-b', default=8, type=int, help='Number of bits to use, only used for HALP')
parser.add_argument('--num_epochs', default=1, type=int, help='Number of epochs')
parser.add_argument('--opt', default='SGD', type=str, help='Optimizer for training')
parser.add_argument('--resume', '-r', action='store_true', help='Resume from checkpoint')
parser.add_argument('--progress', action='store_true')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
# Default to T being 2*size of the dataset if T is not set
if args.T is None and (args.opt == 'SVRG' or args.opt == 'HALP'):
args.T = 2*len(trainloader) # Number of batches
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/{}'.format(ckpt_tag))
net = checkpoint['net']
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch'] + 1
else:
print('==> Building model..')
net = ResNet18()
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
# ===================================================================
# THIS IS NEW --- need to call SVRG/HALP and pass data_loader and T
# and other optional parameters
# ===================================================================
if args.opt == 'SGD':
ckpt_tag = '{}'.format(args.opt)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
elif args.opt == 'SVRG':
ckpt_tag = '{}_T_{}'.format(args.opt, args.T)
optimizer = SVRG(net.parameters(), lr=args.lr, weight_decay=5e-4, data_loader=trainloader, T=args.T)
elif args.opt == 'HALP':
ckpt_tag = '{}_T_{}_mu_{}_b_{}'.format(args.opt, args.T, args.mu, args.bits)
optimizer = HALP(net.parameters(), lr=args.lr, weight_decay=5e-4, data_loader=trainloader, T=args.T, mu=args.mu, bits=args.bits)
# Training
def train(epoch):
losses = []
print('\nEpoch: %d' % epoch)
net.train()
correct = 0
total = 0
for batch_idx, (data, target) in enumerate(trainloader):
def closure(data=data, target=target):
data = Variable(data, requires_grad=False)
target = Variable(target, requires_grad=False)
# Need to pass an argument to use cuda or not
if use_cuda:
data, target = data.cuda(), target.cuda()
output = net(data)
cost = criterion(output, target)
cost.backward()
return cost
optimizer.zero_grad()
loss = optimizer.step(closure)
losses.append(loss.data[0])
if args.progress:
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f' % (loss.data[0]))
return losses
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
accuracies = []
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100.*correct/total
if args.progress:
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), acc, correct, total))
# Save checkpoint.
if acc > best_acc:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/{}'.format(ckpt_tag))
best_acc = acc
accuracies.append(acc)
return accuracies
# Create folders and files to save metrics
if not os.path.isdir('results'):
os.mkdir('results')
if args.opt == 'SGD':
if not os.path.isdir('results/sgd'):
os.mkdir('results/sgd')
tag = 'results/sgd/ResNet_lr_{}'.format(args.lr)
if args.opt == 'SVRG':
if not os.path.isdir('results/svrg'):
os.mkdir('results/svrg')
tag = 'results/svrg/ResNet_lr_{}_T_{}_l2_5e-4'.format(args.lr, args.T)
if args.opt == 'HALP':
if not os.path.isdir('results/halp'):
os.mkdir('results/halp')
tag = 'results/halp/ResNet_lr_{}_T{}_mu_{}_b_{}_l2_5e-4'.format(args.lr, args.T, args.mu, args.bits)
training_file = '{}_train.csv'.format(tag)
test_file = '{}_test.csv'.format(tag)
# Remove file since we append to it over training
if start_epoch == 0:
try:
os.remove(training_file)
os.remove(test_file)
except OSError:
pass
# Do training
for epoch in range(start_epoch, start_epoch+args.num_epochs):
training_losses = train(epoch)
test_accuracies = test(epoch)
# Save metrics
with open(training_file, 'a+') as csvfile:
csvwriter = csv.writer(csvfile)
for row in training_losses:
csvwriter.writerow([row])
with open(test_file, 'a+') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(test_accuracies)
| torchhalp-master | examples/cifar10/main.py |
import math
import torch
# Modified from
# https://github.com/aaron-xichen/pytorch-playground/blob/master/utee/quant.py
def quantize_(input, scale_factor, bits, biased=False):
assert bits >= 1, bits
bound = math.pow(2.0, bits-1)
min_val = - bound
max_val = bound - 1
if biased:
adj_val = 0.5
else:
# Generate tensor of random values from [0,1]
adj_val = torch.Tensor(input.size()).type(input.type()).uniform_()
rounded = input.div_(scale_factor).add_(adj_val).floor_()
clipped_value = rounded.clamp_(min_val, max_val)
clipped_value *= scale_factor
def saturate_(input, scale_factor, bits):
bound = math.pow(2.0, bits-1)
min_val = - bound * scale_factor
max_val = (bound-1) * scale_factor
input.clamp_(min_val, max_val)
# Monkey patch torch.Tensor
torch.Tensor.quantize_ = quantize_
torch.Tensor.saturate_ = saturate_
torch.cuda.FloatTensor.quantize_ = quantize_
torch.cuda.FloatTensor.saturate_ = saturate_ | torchhalp-master | torchhalp/quantize.py |
torchhalp-master | torchhalp/__init__.py |
|
from torch.optim.optimizer import Optimizer, required
import torch
from torch.autograd import Variable
import copy, logging
class SVRG(torch.optim.SGD):
"""Implements stochastic variance reduction gradient descent.
Args:
params (iterable): iterable of parameters to optimize
lr (float): learning rate
T (int): number of iterations between the step to take the full grad/save w
data_loader (DataLoader): dataloader to use to load training data
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
momentum (float, optional): momentum (default: 0)
opt (torch.optim): optimizer to baseclass (default: SGD)
"""
def __init__(self, params, lr=required, T=required, data_loader=required, weight_decay=0.0,
momentum=0.0, opt=torch.optim.SGD):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
# Choose the baseclass dynamically.
self.__class__ = type(self.__class__.__name__,
(opt,object),
dict(self.__class__.__dict__))
logging.info("Using base optimizer {} in SVRG".format(opt))
super(self.__class__, self).__init__(params, **defaults)
if len(self.param_groups) != 1:
raise ValueError("SVRG doesn't support per-parameter options "
"(parameter groups)")
params = self.param_groups[0]['params']
self._params = params
self._curr_w = [p.data for p in params]
self._prev_w = [p.data.clone() for p in params]
# Gradients are lazily allocated and don't exist yet. However, gradients are
# the same shape as the weights so we can still allocate buffers here
self._curr_grad = [p.data.clone() for p in params]
self._prev_grad = [p.data.clone() for p in params]
self._full_grad = None
self.data_loader = data_loader
self.state['t_iters'] = T
self.T = T # Needed to trigger full gradient
logging.info("Data Loader has {} with batch {}".format(len(self.data_loader),
self.data_loader.batch_size))
def __setstate__(self, state):
super(self.__class__, self).__setstate__(state)
def _zero_grad(self):
for p in self._params:
if p.grad is not None:
p.grad.detach()
p.grad.zero_()
def _set_weights_grad(self,ws,gs):
for idx, p in enumerate(self._params):
if ws is not None: p.data = ws[idx]
if gs is not None and p.grad is not None: p.grad.data = gs[idx]
if p.grad is not None:
assert (p.grad.data.data_ptr() == gs[idx].data_ptr())
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
# Calculate full gradient
if self.state['t_iters'] == self.T:
# Setup the full grad
# Reset gradients before accumulating them
self._set_weights_grad(None, self._full_grad)
self._zero_grad()
# Accumulate gradients
for i, (data, target) in enumerate(self.data_loader):
closure(data, target)
# Adjust summed gradients by num_iterations accumulated over
# assert(n_iterations == len(self.data_loader))
for p in self._params:
if p.grad is not None:
p.grad.data /= len(self.data_loader)
if self._full_grad is None:
self._full_grad = [p.grad.data.clone() for p in self._params]
# Copy w to prev_w
for p, p0 in zip(self._curr_w, self._prev_w):
p0.copy_(p)
# Reset t
self.state['t_iters'] = 0
# Setup the previous grad
self._set_weights_grad(self._prev_w, self._prev_grad)
self._zero_grad()
closure()
# Calculate the current grad.
self._set_weights_grad(self._curr_w, self._curr_grad)
self._zero_grad()
loss = closure()
# Adjust the current gradient using the previous gradient and the full gradient.
# We have normalized so that these are all comparable.
for p, d_p0, fg in zip(self._params, self._prev_grad, self._full_grad):
# Adjust gradient in place
if p.grad is not None:
p.grad.data -= (d_p0 - fg)
# Call optimizer update step
super(self.__class__, self).step()
self.state['t_iters'] += 1
return loss | torchhalp-master | torchhalp/optim/svrg.py |
from torch.optim.optimizer import Optimizer, required
import torch
from torch.autograd import Variable
import copy, logging
import math
import torchhalp.quantize
class HALP(torch.optim.SGD):
"""Implements high-accuracy low-precision algorithm.
Args:
params (iterable): iterable of parameters to optimize
lr (float): learning rate
T (int): number of iterations between the step to take the full grad/save w
data_loader (DataLoader): dataloader to use to load training data
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
momentum (float, optional): momentum (default: 0)
opt (torch.optim): optimizer to baseclass (default: SGD)
mu (float, optional): mu hyperparameter for HALP algorithm (default: 0.1)
bits (int, optional): number of bits to use for offset (default: 8)
biased (bool, optional): type of rounding to use for quantization (default: unbiased)
"""
def __init__(self, params, lr=required, T=required, data_loader=required,
weight_decay=0.0, momentum=0.0, opt=torch.optim.SGD, mu=1e-1, bits=8, biased=False):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
# Choose the baseclass dynamically
self.__class__ = type(self.__class__.__name__,
(opt,object),
dict(self.__class__.__dict__))
logging.info("Using base optimizer {} in HALP".format(opt))
super(self.__class__, self).__init__(params, **defaults)
if len(self.param_groups) != 1:
raise ValueError("HALP doesn't support per-parameter options "
"(parameter groups)")
if bits <= 1:
raise ValueError("HALP requires > 1 bit.")
params = self.param_groups[0]['params']
self._params = params
self._curr_w = [p.data for p in params]
self._z = [p.data.clone() for p in params]
self._prev_w = [p.data.clone() for p in params]
# Gradients are lazily allocated and don't exist yet. However, gradients are
# the same shape as the weights so we can still allocate buffers here
self._curr_grad = [p.data.clone() for p in params]
self._prev_grad = [p.data.clone() for p in params]
self._full_grad = None
self.data_loader = data_loader
self.state['t_iters'] = T
self.T = T # Needed to trigger full gradient
logging.info("Data Loader has {} with batch {}".format(len(self.data_loader),
self.data_loader.batch_size))
# Separate scale factor for each layer
self._scale_factors = [1 for p in params]
self._bits = bits
self._mu = mu
self._biased = biased
def __setstate__(self, state):
super(self.__class__, self).__setstate__(state)
def _zero_grad(self):
for p in self._params:
if p.grad is not None:
p.grad.detach()
p.grad.zero_()
def _set_weights_grad(self,ws,gs):
""" Set the pointers in params to ws and gs for p.data and p.grad.data
respectively. This allows us to avoid copying data in and out of parameters.
"""
for idx, p in enumerate(self._params):
if ws is not None: p.data = ws[idx]
if gs is not None and p.grad is not None:
p.grad.data = gs[idx]
assert (p.grad.data.data_ptr() == gs[idx].data_ptr())
def _rescale(self):
"""Update scale factors for z."""
div_factor = math.pow(2.0, self._bits-1) - 1
for i, fg in enumerate(self._full_grad):
self._scale_factors[i] = fg.norm() / (self._mu * div_factor)
def _reset_z(self):
"""Set z to zero."""
for p in self._z:
p.fill_(0)
def _recenter(self, ws):
"""Add the values in self._z to ws."""
for w, z in zip(ws, self._z):
w.add_(z)
def _compute_full_grad(self, closure):
""" Call the closure function to compute the gradient
over the entire dataset, and accumulate the gradient into
self._full_grad.
"""
# Set up pointers for the full gradient
# Reset gradients before accumulating them
self._set_weights_grad(self._prev_w, self._full_grad)
self._zero_grad()
# Accumulate gradients
for i, (data, target) in enumerate(self.data_loader):
closure(data, target)
# Adjust summed gradients by num_iterations accumulated over
# Assumes loss size average argument is true
for p in self._params:
if p.grad is not None:
p.grad.data /= len(self.data_loader)
# Since p.grad is dynamically allocated, the pointers to the gradients won't
# be set before backward is called the first time
if self._full_grad is None:
self._full_grad = [p.grad.data.clone() for p in self._params]
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
# Calculate full gradient
if self.state['t_iters'] == self.T:
self._compute_full_grad(closure)
self._rescale()
self._reset_z()
# Reset t
self.state['t_iters'] = 0
# Calculate gradient of prev_w
self._set_weights_grad(self._prev_w, self._prev_grad)
self._zero_grad()
closure()
# Calculate the current curr_w (which equals prev_w + z)
self._set_weights_grad(self._curr_w, self._curr_grad)
self._zero_grad()
loss = closure()
# Adjust the current gradient using the previous gradient and the full gradient.
for i, p in enumerate(self._params):
# Adjust gradient in-place
if p.grad is not None:
# gradient_update = curr_grad - prev_grad + full_grad
p.grad.data -= (self._prev_grad[i] - self._full_grad[i])
# Set the param pointers to z to update z with step
self._set_weights_grad(self._z, None)
# Call optimizer update step
super(self.__class__, self).step()
# Quantize z in place
for p, sf in zip(self._z, self._scale_factors):
p.quantize_(sf, self._bits, biased=self._biased)
# Increment "inner loop" counter
self.state['t_iters'] += 1
# Set curr_w to prev_w + z
for p, p0 in zip(self._curr_w, self._prev_w):
p.copy_(p0)
self._recenter(self._curr_w)
# Update param pointers to curr_w for user access
self._set_weights_grad(self._curr_w, self._curr_grad)
# Update prev_w to prev_w + z after the "inner loop" has finished
if self.state['t_iters'] == self.T:
self._recenter(self._prev_w)
return loss
| torchhalp-master | torchhalp/optim/halp.py |
from svrg import SVRG
from halp import HALP
| torchhalp-master | torchhalp/optim/__init__.py |
import torch
import numpy as np
import os, sys
from sklearn.manifold import Isomap
import utils.distortions as dis
import utils.load_graph as load_graph
module_path = os.path.abspath(os.path.join('./pytorch'))
if module_path not in sys.path:
sys.path.append(module_path)
import graph_helpers as gh
from hyperbolic_models import ProductEmbedding
from hyperbolic_parameter import RParameter
def unwrap(x):
if isinstance(x, list) : return [unwrap(u) for u in x]
if isinstance(x, tuple): return tuple([unwrap(u) for u in list(x)])
return x.detach().cpu().numpy()
def dist_e(u, v):
return np.linalg.norm(u-v)
def dist_row(x, i):
m = x.shape[0]
dx = np.zeros([m])
for j in range(m):
dx[j] = dist_e(x[i,:], x[j,:])
return dx
def dist_matrix(x):
m = x.shape[0]
rets = np.zeros([m,m])
for i in range(m):
rets[i,:] = dist_row(x, i)
#print(rets)
return rets
# load an embedding and a graph and do isomap
def run_isomap(emb_name, dataset, r):
#emb_name = 'isomap_test/smalltree.E10-1.lr10.emb.final'
#dataset = 'data/edges/smalltree.edges'
dataset = 'data/edges/' + dataset + '.edges'
m = torch.load(emb_name)
emb_orig = unwrap(m.E[0].w)
# perform the isomap dim reduction
embedding = Isomap(n_components=r)
emb_transformed = embedding.fit_transform(emb_orig)
#print(emb_transformed.shape)
num_workers = 1
scale = 1
# compute d_avg
G = load_graph.load_graph(dataset)
n = G.order()
H = gh.build_distance(G, scale, num_workers=int(num_workers) if num_workers is not None else 16)
#Hrec = unwrap(m.dist_matrix())
Hrec = dist_matrix(emb_transformed)
mc, me, avg_dist, nan_elements = dis.distortion(H, Hrec, n, num_workers)
wc_dist = me*mc
print("d_avg = ", avg_dist)
return avg_dist | hyperbolics-master | iso_comp.py |
import glob, os, sys
import pandas as pd
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import iso_comp
if __name__ == '__main__':
run_name = sys.argv[1]
rows = []
for f in sorted(glob.glob(run_name + '/*.emb.final')):
line = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0] + ' '
dataset = os.path.splitext(os.path.splitext(line)[0])[0]
iso_comp.run_isomap(f, dataset, 2) | hyperbolics-master | run_isomaps.py |
import matplotlib as mpl
import matplotlib.pyplot as plt
import requests
import numpy as np
import json
from scipy.sparse import csr_matrix
import networkx as nx
from collections import defaultdict
import os
def make_edge_set(): return ([],([],[]))
def add_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(1)
# Build dicts based on properties in Wikidata.
Rel_toPIDs ={'airline_hub':'P113', 'lyrics_by':'P676', 'place_of_publication':'P291'}
numb_success = 0
failed_requests = []
json_errors = []
empty_results = []
dense_rels = []
for key, val in Rel_toPIDs.items():
curr_query = '''PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?item ?instance_of WHERE {
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
OPTIONAL { ?item wdt:%s ?instance_of. }
}
'''%(val)
url = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql'
curr_data = requests.get(url, params={'query': curr_query, 'format': 'json'})
print(curr_data.status_code)
if curr_data.status_code != 200:
print("Failed rel from HTTPS:"+str(key))
failed_requests.append(key)
else:
try:
curr_data = curr_data.json(strict=False)
# Write the edgelists and dicts for the rel.
rel = key
QtoIDs = dict()
IDtoQs = dict()
e = make_edge_set()
counter = 0
triple_count = 0
if 'instance_of' in (curr_data['results']['bindings'][0]):
for triple in curr_data['results']['bindings']:
instance_of = triple['instance_of']['value'].split("/")[-1]
item = triple['item']['value'].split("/")[-1]
if item not in QtoIDs.keys():
QtoIDs[item] = counter
IDtoQs[counter] = item
counter+=1
if instance_of not in QtoIDs.keys():
QtoIDs[instance_of] = counter
IDtoQs[counter] = instance_of
add_edge(e, QtoIDs[item], QtoIDs[instance_of])
add_edge(e, QtoIDs[instance_of], QtoIDs[item])
triple_count+=1
# Take the largest connected component for the relationship.
n = len(QtoIDs)
X = csr_matrix(e, shape=(n, n))
G = nx.from_scipy_sparse_matrix(X)
Gc = max(nx.connected_component_subgraphs(G), key=len)
print(rel)
print("Total number of unique entities: "+str(G.number_of_nodes()))
print("Total number of nodes in lcc: "+str(Gc.number_of_nodes()))
Gc_final = nx.convert_node_labels_to_integers(Gc, ordering="decreasing degree", label_attribute="old_label")
if (Gc.number_of_edges()>100*Gc.number_of_nodes()):
dense_rels.append(key)
#Create the dict for old-id <-> new-id matching for QIDs.
RefDict = Gc_final.node
IDtoQs_f = dict()
QtoIDs_f = dict()
for new_idx in RefDict.keys():
old_idx = RefDict[new_idx]['old_label']
curr_Q = IDtoQs[old_idx]
IDtoQs_f[new_idx] = curr_Q
QtoIDs_f[curr_Q] = new_idx
#Write the final edgelist and dump IDstoQs_f dict.
nx.write_edgelist(Gc_final, "data/wikidata_edges/"+str(rel)+"_lcc.edges",data=False)
json.dump(IDtoQs_f, open("data/wikidata_edges/"+str(rel)+"_IDstoQs.txt","w"))
else:
empty_results.append(key)
except json.decoder.JSONDecodeError:
json_errors.append(key)
print("Failed HTTP requests:")
print(failed_requests)
print("JSONDecodeErrors")
print(json_errors)
print("Empty rels")
print(empty_results)
print("Dense rels")
print(dense_rels)
| hyperbolics-master | products/wikidata_relextract.py |
import logging, argh
import os, sys
import networkx as nx
import numpy as np
# root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.insert(0, root_dir)
import utils.load_graph as load_graph
import utils.vis as vis
import utils.distortions as dis
import pytorch.graph_helpers as gh
def Ka(D, m, b, c, a):
if a == m: return 0.0
k = D[a][m]**2 + D[b][c]**2/4.0 - (D[a][b]**2 + D[a][c]**2)/2.0
k /= 2*D[a][m]
# print(f'{m}; {b} {c}; {a}: {k}')
return k
def K(D, n, m, b, c):
ks = [Ka(D, m, b, c, a) for a in range(n)]
return np.mean(ks)
def estimate_curvature(G, D, n):
for m in range(n):
ks = []
edges = list(G.edges(m))
for i in range(len(edges)):
for j in range(b,len(edges)):
b = edges[i]
c = edges[j]
ks.append(K(D, n, b, c))
# TODO turn ks into a cdf
return None
# TODO: what is the correct normalization wrt n? e.g. make sure it works for d-ary trees
def sample_G(G, D, n, n_samples=100):
samples = []
_cnt = 0;
while _cnt < n_samples:
m = np.random.randint(0, n)
edges = list(G.edges(m))
# print(f"edges of {m}: {edges}")
i = np.random.randint(0, len(edges))
j = np.random.randint(0, len(edges))
b = edges[i][1]
c = edges[j][1]
# TODO special case for b=c?
if b==c: continue
a = np.random.randint(0, n)
k = Ka(D, m, b, c, a)
samples.append(k)
# print(k)
_cnt += 1
return np.array(samples)
def sample_components(n1=5, n2=5):
""" Sample dot products of tangent vectors """
a1 = np.random.chisquare(n1-1) # ||x_1||^2
b1 = np.random.chisquare(n1-1) # ||y_1||^2
c1 = 2*np.random.beta((n1-1)/2, (n1-1)/2) - 1 # <x_1,y_1> normalized
c1 = a1*b1*c1**2 # <x_1,y_1>^2
a2 = np.random.chisquare(n2-1) # ||x_1||^2
b2 = np.random.chisquare(n2-1) # ||y_1||^2
c2 = 2*np.random.beta((n2-1)/2, (n2-1)/2) - 1
c2 = a2*b2*c2**2
alpha1 = a1*b1 - c1
alpha2 = a2*b2 - c2
beta = a1*b2 + a2*b1
denom = alpha1+alpha2+beta
return alpha1/denom, alpha2/denom
def sample_K(m1, m2, n1=5, n2=5, n_samples=100):
w1s = []
w2s = []
for _ in range(n_samples):
w2, w1 = sample_components(n1, n2)
w1s.append(w1)
w2s.append(w2)
# match moments of K1 * w1 + K2 * w2
w1s = np.array(w1s)
w2s = np.array(w2s)
coefK1 = np.mean(w1s)
coefK2 = np.mean(w2s)
coefK1K1 = np.mean(w1s**2) # coefficient of K1^2
coefK2K2 = np.mean(w2s**2)
coefK1K2 = np.mean(2*w1s*w2s)
print("coefs", coefK1, coefK2, coefK1K1, coefK1K2, coefK2K2)
# turn into quadratic a K1^2 + b K1 + c = 0
# a = coefK1K1 - coefK1K2*coefK1/coefK2 + coefK2K2*coefK1**2/coefK2**2
# b = coefK1K2*m1/coefK2 - 2*coefK2K2*m1*coefK1/coefK2
# c = coefK2K2*m1**2/coefK2**2 - m2
a = coefK2**2*coefK1K1 - coefK2*coefK1K2*coefK1 + coefK2K2*coefK1**2
b = coefK2 * coefK1K2*m1 - coefK2 * 2*coefK2K2*m1*coefK1
c = coefK2K2*m1**2 - coefK2**2 * m2
print("quadratic", a, b, c)
K1_soln1 = (-b + np.sqrt(b**2-4*a*c))/(2*a)
K1_soln2 = (-b - np.sqrt(b**2-4*a*c))/(2*a)
K2_soln1 = (m1 - coefK1*K1_soln1)/coefK2
K2_soln2 = (m1 - coefK1*K1_soln2)/coefK2
return ((K1_soln1, K2_soln1), (K1_soln2, K2_soln2))
# def match_moments(coefK1, coefK2, coefK12, coefK22, coefK1K2, m1, m2):
# @argh.arg('--dataset')
def estimate(dataset='data/edges/smalltree.edges', n_samples=100000):
G = load_graph.load_graph(dataset)
n = G.order()
GM = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
num_workers = 16
D = gh.build_distance(G, 1.0, num_workers) # load the whole matrix
# n_samples = 100000
n1 = 5
n2 = 5
samples = sample_G(G, D, n, n_samples)
# coefs = sample_K(n1, n2, n_samples)
print("stats", np.mean(samples), np.std(samples)**2, np.mean(samples**2))
m1 = np.mean(samples)
m2 = np.mean(samples**2)
solns = sample_K(m1, m2, n1, n2, n_samples)
print(solns)
if __name__ == '__main__':
parser = argh.ArghParser()
parser.set_default_command(estimate)
parser.dispatch()
| hyperbolics-master | products/curv.py |
import numpy as np | hyperbolics-master | analysis/load_emb.py |
# Baselines using ancestor encoding:
import networkx as nx
import os, sys
import subprocess
edges_dir = '../data/edges/'
all_files = os.listdir(edges_dir)
out = open('./spanning_forest_avgs.txt', 'w')
for file in all_files:
if os.path.isdir(edges_dir+file):
continue
print("Working on ", edges_dir+file)
G = nx.read_edgelist(edges_dir+file, data=False)
# get the forest:
G_comps = nx.connected_component_subgraphs(G)
n_comps = 0
avg_dists = []
for comp in G_comps:
n_comps += 1
comp = nx.convert_node_labels_to_integers(comp)
comp_bfs = nx.bfs_tree(comp, 0)
dists = nx.shortest_path_length(comp_bfs, 0)
tot_dists = sum(dists.values())
avg_dist = tot_dists/comp_bfs.order()
avg_dists.append(avg_dist)
# that's it for this graph:
out.write(file + " ")
out.write(str(sum(avg_dists)/n_comps) + "\n")
out.write(str(avg_dists) + "\n")
out.close() | hyperbolics-master | utils/baselines.py |
# library of useful hyperbolic functions
import numpy as np
# Reflection (circle inversion of x through orthogonal circle centered at a)
def isometric_transform(a, x):
r2 = np.linalg.norm(a)**2 - (1.0)
return r2/np.linalg.norm(x - a)**2 * (x-a) + a
# Inversion taking mu to origin
def reflect_at_zero(mu,x):
a = mu/np.linalg.norm(mu)**2
return isometric_transform(a,x)
# Why isn't this in numpy?
def acosh(x):
return np.log(x + np.sqrt(x**2-1))
# Hyperbolic distance
def dist(u,v):
z = 2 * np.linalg.norm(u-v)**2
uu = 1. + z/((1-np.linalg.norm(u)**2)*(1-np.linalg.norm(v)**2))
return acosh(uu)
# Hyperbolic distance from 0
def hyp_dist_origin(x):
return np.log((1+np.linalg.norm(x))/(1-np.linalg.norm(x)))
# Scalar multiplication w*x
def hyp_scale(w, x):
if w == 1:
return x
else:
x_dist = (1+np.linalg.norm(x))/(1-np.linalg.norm(x))
alpha = 1-2/(1+x_dist**w)
alpha *= 1/np.linalg.norm(x)
return alpha*x
# Convex combination (1-w)*x+w*y
def hyp_conv_comb(w, x, y):
# circle inversion sending x to 0
(xinv, yinv) = (reflect_at_zero(x, x), reflect_at_zero(x, y))
# scale by w
pinv = hyp_scale(w, yinv)
# reflect back
return reflect_at_zero(x, pinv)
# Weighted sum w1*x + w2*y
def hyp_weighted_sum(w1, w2, x, y):
p = hyp_conv_comb(w2 / (w1 + w2), x, y)
return hyp_scale(w1 + w2, p) | hyperbolics-master | utils/hyp_functions.py |
# This implements the algorithm for finding a good tree embedding from
import networkx as nx
import scipy.sparse.csgraph as csg
import numpy as np
import time, argh
import data_prep as dp
import distortions as dis
import load_dist as ld
import pickle
from joblib import Parallel, delayed
import multiprocessing
# get the biggest Gromov product in num rows
def biggest_row(metric,start,num, r, n):
p,q,curr = 0,0,-1
for a in range(start, min(start+num, n)-1):
if metric[a] >= 0:
for b in range(n):
if metric[b] >= 0 and a != b:
gpr = gp(dists,a,b,r)
if gpr >= curr:
p,q,curr = (a,b,gpr)
return (p,q,curr)
# get a node from G
def first_node(G):
for node in G.nodes():
return node
# helper to run Dijkstra
def compute_row(i, adj_mat, uw):
return csg.dijkstra(adj_mat, indices=[i], unweighted=uw, directed=False)
# the Gromov product distance
def gp(dists,x,y,z):
dxy = dists[x,y]
dxz = dists[x,z]
dyz = dists[y,z]
return 1/2*(float(dxz+dyz-dxy))
# iterative construction. Requires that nodes are 0...n-1
def construct_tree_i(metric, r, next_label, n):
# build list of nodes to remove and re-attach, until we get down to 1 node...
removal, partner = np.zeros(n-2), np.zeros(n-2);
removal*=-1;
metric_shared = np.array(metric)
metric_shared[r] = -1
with Parallel(n_jobs=-1) as parallel:
idx = 0
while sum(metric_shared>=0)>1:
t = time.time()
num=100
res = parallel(delayed(biggest_row)(metric_shared, idx*num, num, r, n) for idx in range(int(np.ceil(n/num))))
biggest_rows = np.vstack(res)
biggest = np.argmax(biggest_rows[:,2])
p = int(biggest_rows[biggest,0])
q = int(biggest_rows[biggest,1])
if dists[p,r] > dists[q,r]:
p,q = q,p
removal[idx] = q
partner[idx] = p
metric_shared[q] = -1
idx += 1
#print("Elapsed = ", time.time()-t)
# put in the first node:
v = np.argmax(metric_shared)
T = nx.Graph()
T.add_edge(int(v),int(r),weight=dists[v,r])
idx -= 1
# place the remaining nodes one by one:
while idx >= 0:
q,p = int(removal[idx]), int(partner[idx])
qr_p = gp(dists,q,r,p)
pr_q = gp(dists,p,r,q)
pq_r = gp(dists,p,q,r)
# get the new weight for the Steiner node and add it in
for node in T.neighbors(p):
new_weight = max(0,T[p][node]["weight"]-qr_p)
T.add_edge(next_label, node, weight=new_weight)
# reattach p and q as leaves
T.remove_node(p)
T.add_edge(next_label, p, weight=qr_p)
T.add_edge(q, next_label, weight=pr_q)
next_label += 1
idx -= 1
return T
@argh.arg("--ds", help="Dataset")
def steiner_tree(ds="1"):
ds = int(ds)
G = dp.load_graph(ds)
n = G.order()
print("# of vertices is ", n)
global dists
dists = ld.load_dist_mat("dists/dist_mat"+str(ds)+".p")
nv = np.zeros(n)
for i in range(n):
nv[i] = 1
metric = list(G.nodes())
# root:
r = first_node(G)
print("Building trees")
t = time.time()
G_tree = construct_tree_i(metric, r, n, n)
print("Done. Elapsed time = ", time.time()-t)
n_Steiner = G_tree.order()
adj_mat_tree = nx.to_scipy_sparse_matrix(G_tree.to_undirected(), range(n_Steiner))
dist_mat_S_tree = Parallel(n_jobs=20)(delayed(compute_row)(i,adj_mat_tree, False) for i in range(n_Steiner))
dist_mat_S_tree = np.vstack(dist_mat_S_tree)
dist_mat_S_tree_n = dist_mat_S_tree[0:n, 0:n]
print("Measuring Distortion")
t = time.time()
t_d_max, t_d_avg, bad = dis.distortion(dists, dist_mat_S_tree_n, n, 2)
print("Steiner tree distortion = ", t_d_max, t_d_avg)
MAP_S = dis.map_score(dists, dist_mat_S_tree_n, n, 2)
print("Steiner tree MAP = ", MAP_S)
file = "./trees/tree" + str(ds) + ".p"
pickle.dump(G_tree, open(file,"wb"))
print("Elapsed time = ", time.time()-t)
return G_tree
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.add_commands([steiner_tree])
_parser.dispatch()
| hyperbolics-master | utils/steiner.py |
hyperbolics-master | utils/__init__.py |
|
# visualization functions
import numpy as np
import networkx as nx
import os, sys
from itertools import product, combinations
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import utils.hyp_functions as hf
import torch
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
from matplotlib import patches
from mpl_toolkits.mplot3d import Axes3D
#matplotlib.verbose.set_level("helpful")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# indexing because subplot isn't smart:
def get_ax(num_hypers, num_spheres, ax, emb, is_sphere=0):
idx = 1 if is_sphere and num_hypers > 0 else 0
if num_hypers > 0 and num_spheres > 0 and num_hypers + num_spheres > 2:
if len(ax) > np.maximum(num_hypers, num_spheres):
# this means we're in 3d
ax_this = ax[idx * np.maximum(num_hypers, num_spheres) + emb]
else:
ax_this = ax[idx, emb]
elif num_hypers == 1 and num_spheres == 1:
ax_this = ax[idx]
elif num_hypers > 1 or num_spheres > 1:
ax_this = ax[emb]
else:
ax_this = ax
return ax_this
# convert hyperboloid points (3 dimensions) to Poincare points (2 dimension):
def hyperboloid_to_poincare(a):
x = np.zeros([2])
for i in range(1, 3):
x[i-1] = a[i] / (1.0 + a[0])
return x
# collinearity check. if collinear, draw a line and don't attempt curve
def collinear(a,b,c):
if np.abs(c[0] - b[0]) < .1**4 and np.abs(c[0]-a[0]) < .1**4:
return True
elif np.abs(c[0] - b[0]) < .1**4 or np.abs(c[0]-a[0]) < .1**4:
return False
slope1 = np.abs((c[1]-b[1])/(c[0]-b[0]))
slope2 = np.abs((c[1]-a[1])/(c[0]-a[0]))
if np.abs(slope1 - slope2) < .1**4:
return True
return False
# todo: speed this code up
def get_circle_center(a,b,c):
m = np.zeros([2,2])
m[0,0] = 2*(c[0]-a[0])
m[0,1] = 2*(c[1]-a[1])
m[1,0] = 2*(c[0]-b[0])
m[1,1] = 2*(c[1]-b[1])
v = np.zeros([2,1])
v[0] = c[0]**2 + c[1]**2 - a[0]**2 - a[1]**2
v[1] = c[0]**2 + c[1]**2 - b[0]**2 - b[1]**2
return (np.linalg.inv(m)@v).flatten()
# distance for Euclidean coordinates
def euclid_dist(a,b):
return np.linalg.norm(a-b)
# angles for arc
def get_angles(center, a):
if abs(a[0] - center[0]) < 0.1**3:
if a[1] > center[1] : theta = 90
else: theta = 270
else:
theta = np.rad2deg(np.arctan((a[1]-center[1])/(a[0]-center[0])))
# quadrant 3:
if (a[0]-center[0]) < 0 and (a[1]-center[1]) < 0:
theta += 180
# quadrant 2
if (a[0]-center[0]) < 0 and (a[1]-center[1]) >= 0:
theta -= 180
# always use non-negative angles
if theta < 0: theta += 360
return theta
# draw hyperbolic line:
def draw_geodesic(a, b, c, ax, node1=None, node2=None, verbose=False):
if verbose:
print("Geodesic points are ", a, "\n", b, "\n", c, "\n")
is_collinear = False
if collinear(a,b,c):
is_collinear = True
else:
cent = get_circle_center(a,b,c)
radius = euclid_dist(a, cent)
t1 = get_angles(cent, b)
t2 = get_angles(cent, a)
if verbose:
print("\ncenter at ", cent)
print("radius is ", radius)
print("angles are ", t1, " ", t2)
print("dist(a,center) = ", euclid_dist(cent,a))
print("dist(b,center) = ", euclid_dist(cent,b))
print("dist(c,center) = ", euclid_dist(cent,c))
# if the angle is really tiny, a line is a good approximation
if is_collinear or (np.abs(t1-t2) < 2):
coordsA = "data"
coordsB = "data"
e = patches.ConnectionPatch(a, b, coordsA, coordsB, linewidth=2)
else:
if verbose:
print("angles are theta_1 = ", t1, " theta_2 = ", t2)
if (t2>t1 and t2-t1<180) or (t1>t2 and t1-t2>=180):
e = patches.Arc((cent[0], cent[1]), 2*radius, 2*radius,
theta1=t1, theta2=t2, linewidth=2, fill=False, zorder=2)
else:
e = patches.Arc((cent[0], cent[1]), 2*radius, 2*radius,
theta1=t2, theta2=t1, linewidth=2, fill=False, zorder=2)
ax.add_patch(e)
# to draw geodesic between a,b, we need
# a third point. easy with inversion
def get_third_point(a,b):
b0 = hf.reflect_at_zero(a,b)
c0 = b0/2.0
c = hf.reflect_at_zero(a,c0)
return c
def draw_geodesic_on_circle(a, b, ax):
lp = 5 # number of points for the mesh
d = np.array(b) - np.array(a)
vals = np.zeros([3, lp])
for i in range(lp):
for j in range(3):
vals[j,i] = a[j] + d[j]*(i/(lp-1))
# let's project back to sphere:
nrm = vals[0,i]**2 + vals[1,i]**2 + vals[2,i]**2
for j in range(3):
vals[j,i] /= np.sqrt(nrm)
# draw the geodesic:
for i in range(lp-1):
ax.plot([vals[0,i], vals[0,i+1]], [vals[1,i], vals[1,i+1]], zs=[vals[2,i], vals[2,i+1]], color='r')
# for circle stuff let's just draw the points
def draw_points_on_circle(a, node, ax):
ax.plot(a[0], a[1], "o", markersize=16)
ax.text(a[0] * (1 + 0.05), a[1] * (1 + 0.05) , node, fontsize=12)
def draw_points_on_sphere(a, node, ax):
ax.scatter(a[0], a[1], a[2], c='b', marker='o', s=32)
ax.text(a[0] * (1 + 0.05), a[1] * (1 + 0.05) , a[2] * (1 + 0.05), node, fontsize=12)
def draw_points_hyperbolic(a, node, ax):
ax.plot(a[0], a[1], "o")
ax.text(a[0] * (1 + 0.05), a[1] * (1 + 0.05) , node, fontsize=12)
# draw the embedding for a graph
# G is the graph, m is the PyTorch hyperbolic model
def draw_graph(G, m, fig, ax):
num_spheres = np.minimum(len(m.S), 5)
num_hypers = np.minimum(len(m.H), 5)
sdim = 0 if len(m.S) == 0 else len((m.S[0]).w[0])
for emb in range(num_spheres):
ax_this = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=1)
if sdim == 3:
spherical_setup_3d(fig, ax_this)
else:
spherical_setup(fig, ax_this)
for emb in range(num_hypers):
ax_this_hyp = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=0)
hyperbolic_setup(fig, ax_this_hyp)
# todo: directly read edge list from csr format
Gr = nx.from_scipy_sparse_matrix(G)
for edge in Gr.edges():
idx = torch.LongTensor([edge[0], edge[1]]).to(device)
for emb in range(num_hypers):
a = hyperboloid_to_poincare(((torch.index_select(m.H[emb].w, 0, idx[0])).clone()).detach().cpu().numpy()[0])
b = hyperboloid_to_poincare(((torch.index_select(m.H[emb].w, 0, idx[1])).clone()).detach().cpu().numpy()[0])
ax_this_hyp = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=0)
c = get_third_point(a,b)
draw_geodesic(a,b,c,ax_this_hyp, edge[0], edge[1])
# let's draw the edges on the sphere; these are geodesics
if sdim == 3:
for emb in range(num_spheres):
ax_this = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=1)
a = ((torch.index_select(m.S[emb].w, 0, idx[0])).clone()).detach().cpu().numpy()[0]
b = ((torch.index_select(m.S[emb].w, 0, idx[1])).clone()).detach().cpu().numpy()[0]
draw_geodesic_on_circle(a, b, ax_this)
for node in Gr.nodes():
idx = torch.LongTensor([int(node)]).to(device)
for emb in range(num_spheres):
ax_this = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=1)
v = ((torch.index_select(m.S[emb].w, 0, idx)).clone()).detach().cpu().numpy()[0]
if sdim == 3:
draw_points_on_sphere(v, node, ax_this)
else:
draw_points_on_circle(v, node, ax_this)
for emb in range(num_hypers):
ax_this_hyp = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=0)
a_hyp = (torch.index_select(m.H[emb].w, 0, idx).clone()).detach().cpu().numpy()[0]
a = hyperboloid_to_poincare(a_hyp)
draw_points_hyperbolic(a, node, ax_this_hyp)
def setup_plot(m, name=None, draw_circle=False):
# create plot
num_spheres = np.minimum(len(m.S), 5)
num_hypers = np.minimum(len(m.H), 5)
tot_rows = 2 if num_spheres > 0 and num_hypers > 0 else 1
wid = np.maximum(num_spheres, num_hypers)
if num_spheres + num_hypers > 1:
fig, axes = plt.subplots(tot_rows, wid, sharey=True, figsize=(wid*10, tot_rows*10))
else:
fig, axes = plt.subplots(figsize = (10, 10))
ax = axes
matplotlib.rcParams['animation.ffmpeg_args'] = '-report'
writer = animation.FFMpegFileWriter(fps=10, metadata=dict(artist='HazyResearch'))#, bitrate=1800)
if name is None:
name = 'ProductVisualizations.mp4'
else:
name += '.mp4'
writer.setup(fig, name, dpi=108)
sdim = 0 if len(m.S) == 0 else len((m.S[0]).w[0])
# need these to all be 3D
if sdim == 3:
for emb in range(num_spheres):
ax_this = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=1)
ax_this.remove()
if num_hypers > 0:
ax_new = fig.add_subplot(tot_rows, wid, wid+emb+1, projection='3d')
elif num_spheres > 1:
ax_new = fig.add_subplot(tot_rows, wid, 1+emb, projection='3d')
else:
ax_new = fig.add_subplot(111, projection='3d')
ax = fig.get_axes()
if num_hypers == 0 and num_spheres == 1: ax = ax[0]
if draw_circle:
for emb in range(num_spheres):
ax_this = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=1)
if sdim == 3:
spherical_setup_3d(fig, ax_this)
else:
spherical_setup(fig, ax_this)
for emb in range(num_hypers):
ax_this = get_ax(num_hypers, num_spheres, ax, emb, is_sphere=0)
hyperbolic_setup(fig, ax_this)
return fig, ax, writer
def hyperbolic_setup(fig, ax):
# set axes
ax.set_ylim([-1.2, 1.2])
ax.set_xlim([-1.2, 1.2])
# draw Poincare disk boundary
e = patches.Arc((0,0), 2.0, 2.0,
linewidth=2, fill=False, zorder=2)
ax.add_patch(e)
def spherical_setup(fig, ax):
# set axes
ax.set_ylim([-1.2, 1.2])
ax.set_xlim([-1.2, 1.2])
# draw circle boundary
e = patches.Arc((0,0), 2.0, 2.0,
linewidth=1, fill=False, zorder=2)
ax.add_patch(e)
def spherical_setup_3d(fig, ax):
ax.set_ylim([-1.2, 1.2])
ax.set_xlim([-1.2, 1.2])
# draw sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
ax.plot_wireframe(x, y, z, color="y")
def draw_plot():
plt.show()
def clear_plot():
plt.cla()
| hyperbolics-master | utils/vis.py |
# load the first 3 graphs's distance matrices:
import data_prep as dp
import load_dist as ld
for i in (6,12,13):
G = dp.load_graph(i)
ld.save_dist_mat(G,"dists/dist_mat"+str(i)+".p")
| hyperbolics-master | utils/load_distances.py |
import nltk
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
from collections import defaultdict
import numpy as np
import networkx as nx
import json
import time
from collections import defaultdict
def make_edge_set(): return ([],([],[]))
def add_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(1)
def add_big_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(100)
def load_wordnet():
d = dict()
ID_dict = dict()
all_syns = list(wn.all_synsets())
for idx, x in enumerate(all_syns):
d[x] = idx
ID_dict[idx] = x.name().split('.')[0]
n = len(all_syns)
e = make_edge_set()
for idx, x in enumerate(all_syns):
for y in x.hypernyms():
y_idx = d[y]
add_edge(e, idx , y_idx)
add_edge(e, y_idx, idx)
return e, d, ID_dict, all_syns, csr_matrix(e,shape=(n, n))
def load_connected_components():
e, d, ID_dict, all_syns, X = load_wordnet()
C = connected_components(X)
mat_shape = len(C[1])
prev_comp_idx = 0
print("There are "+str(C[0])+ " connected components.")
for num in range(C[0]):
begin = time.time()
curr_comp = np.array(all_syns)[C[1] == num]
print(curr_comp)
print(len(curr_comp))
# mat_shape += len(curr_comp)
curr_comp_idx = d[curr_comp[0]]
if num!=0:
add_big_edge(e, prev_comp_idx , curr_comp_idx)
add_big_edge(e, curr_comp_idx, prev_comp_idx)
prev_comp_idx = curr_comp_idx
print(str(num)+"th cc took "+str(time.time()-begin))
wordID_dict = defaultdict(list)
for key in d.keys():
for word in key.lemma_names():
if "_" not in word:
idx = d[key]
wordID_dict[word].append(idx)
X2 = csr_matrix(e, shape=(mat_shape, mat_shape))
return (ID_dict, wordID_dict, d, mat_shape, X2)
if __name__ == '__main__':
ID_dict, wordID_dict, d, n, G = load_connected_components()
edges = nx.from_scipy_sparse_matrix(G)
print("writing to the file")
nx.write_weighted_edgelist(edges, "embeddings/wordnet_all.edges")
json.dump(ID_dict,open("embeddings/IDstoWords.txt","w"))
json.dump(wordID_dict,open("embeddings/WordstoIDs.txt","w"))
with open('embeddings/WordstoIDs.txt', 'r') as inf2:
WordstoIDs = eval(inf2.read())
with open('embeddings/wordnet100.emb', 'r') as emb:
emb_lines = emb.readlines()
emb_lines = emb_lines[1:]
vector_dict = dict()
for idx, line in enumerate(emb_lines):
curr_line = line.split(',')[:-1]
vector_dict[int(curr_line[0])] = np.asarray(list(map(np.float64, curr_line[1:])))
#Create the dictionary for final embedding (does simple Euclidean averaging)
final_emb = dict()
for word in WordstoIDs.keys():
counter = 0
curr_sum = np.zeros(vector_dict[0].shape)
for idx in WordstoIDs[word]:
curr_sum += vector_dict[idx]
counter +=1
final_emb[word] = curr_sum/counter
lines = []
for key in final_emb.keys():
curr_line = str(key) + " " + " ".join(list(map(str,final_emb[key])))
lines.append(curr_line)
with open('embeddings/wordnet.100d.txt', 'w') as f:
f.write('\n'.join(lines))
| hyperbolics-master | utils/wordnet_forest_prep.py |
# This is to load all of our data
import networkx as nx
import scipy as sp
import numpy as np
# from Bio import Phylo
# import nltk.corpus as nc
# import utils.word_net_prep as wnp
def load_graph(opt):
if opt == 1:
G = nx.read_edgelist("data/facebook_combined.txt")
elif opt == 2:
G = nx.read_edgelist("data/cithepph.txt")
elif opt == 3:
G = nx.read_edgelist("data/grqc.edgelist")
elif opt == 4:
G = nx.read_edgelist("data/wikilinks.tsv")
elif opt == 5:
G = nx.read_edgelist("data/california.edgelist")
elif opt == 6:
tree = Phylo.read("data/T92308.nex", "nexus")
G = Phylo.to_networkx(tree)
G = nx.convert_node_labels_to_integers(G)
G = G.to_undirected()
elif opt == 7:
G = nx.read_edgelist("data/bio-diseasome.mtx")
elif opt == 8:
G = nx.read_edgelist("data/bio-yeast.mtx")
elif opt == 9:
G = nx.read_edgelist("data/inf-power.mtx")
elif opt == 10:
G = nx.read_edgelist("data/web-edu.mtx")
elif opt == 11:
G = nx.read_edgelist("data/ca-CSphd.mtx")
elif opt == 12:
G = nx.balanced_tree(3,3)
elif opt == 13:
G = nx.balanced_tree(2,2)
elif opt == 14:
(n,C) = wnp.load_big_component()
G = nx.Graph(C).to_undirected();
else:
assert(False)
# take the largest component
Gc = max(nx.connected_component_subgraphs(G), key=len)
G_comp_unsort = max(nx.connected_component_subgraphs(Gc), key=len)
# the connected_component function changes the edge orders, so fix:
G_comp_sorted = nx.Graph()
G_comp_sorted.add_edges_from(sorted(G_comp_unsort.edges()))
G_comp = nx.convert_node_labels_to_integers(G_comp_sorted)
return G_comp
def save_edges(G, name, data=False):
if data:
nx.write_weighted_edgelist(G, "data/edges/" + name + ".edges")
else:
nx.write_edgelist(G, "data/edges/" + name + ".edges", data)
def make_wordnet_weights():
(n,C) = wnp.load_big_component()
G = nx.Graph(C).to_undirected()
Gc = max(nx.connected_component_subgraphs(G), key=len)
# 'entity' is 0:
G_BFS = nx.bfs_tree(Gc, 0)
G_W = nx.Graph()
# each edge must be appropriately weighted:
curr_nodes = [0]
next_nodes = []
depth = 0
while 1:
if len(curr_nodes) == 0:
if len(next_nodes) == 0:
break
depth += 1
curr_nodes = next_nodes.copy()
next_nodes.clear()
node = curr_nodes[0]
parent = list(G_BFS.predecessors(node))
if len(parent) > 0:
G_W.add_edge(node, parent[0], weight=2**(depth-1))
curr_nodes.remove(node)
next_nodes += list(G_BFS.successors(node))
save_edges(G_W, "weighted_wordnet", data=True)
| hyperbolics-master | utils/data_prep.py |
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
from collections import defaultdict
import numpy as np
import networkx as nx
# for adding edges in CSR format
def make_edge_set(): return ([],([],[]))
def add_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(1)
def load_wordnet():
d = dict()
all_syns = list(wn.all_synsets('n'))
for idx, x in enumerate(all_syns): d[x] = idx
n = len(all_syns)
e = make_edge_set()
for idx, x in enumerate(all_syns):
for y in x.hypernyms():
y_idx = d[y]
add_edge(e, idx , y_idx)
# add_edge(e, y_idx, idx)
return csr_matrix(e,shape=(n, n))
def load_big_component():
X = load_wordnet()
C = connected_components(X, directed=False)
sizes = [0] * C[0]
for i in C[1]:
sizes[i] += 1
# sizes = np.array(sizes)
big_comp_idx = np.argmax(sizes)
# print(f"{C[0]} connected components")
# print("connected components sizes: ", sizes[sizes > 1])
print(f"big_comp_idx ", big_comp_idx)
all_syns = list(wn.all_synsets('n'))
comp_0 = np.array(all_syns)[C[1] == big_comp_idx]
n_f = len(comp_0)
_d = dict()
for idx, x in enumerate(comp_0): _d[x] = idx
# e_f = make_edge_set()
# closure = make_edge_set()
e_f = []
closure = []
for idx, x in enumerate(comp_0):
for y in x.hypernyms():
y_idx = _d[y]
# add_edge(e_f, idx , y_idx)
# add_edge(e_f, y_idx, idx)
e_f.append((y_idx, idx))
for y in x.closure(lambda z: z.hypernyms()):
y_idx = _d[y]
# add_edge(closure, idx, y_idx)
closure.append((idx, y_idx))
# X2 = csr_matrix(e_f, shape=(n_f,n_f))
G = nx.DiGraph(e_f)
G_closure = nx.DiGraph(closure)
return (n_f, G, G_closure)
if __name__ == '__main__':
n, G, G_closure = load_big_component()
# edges = nx.from_scipy_sparse_matrix(G)
nx.write_edgelist(G, f"data/edges/wordnet2.edges", data=False)
nx.write_edgelist(G_closure, f"data/edges/wordnet_closure.edges", data=False)
| hyperbolics-master | utils/word_net_prep.py |
# distortions.py
# python code to compute distortion/MAP
import numpy as np
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
def entry_is_good(h, h_rec): return (not np.isnan(h_rec)) and (not np.isinf(h_rec)) and h_rec != 0 and h != 0
def distortion_entry(h,h_rec,me,mc):
avg = abs(h_rec - h)/h
if h_rec/h > me: me = h_rec/h
if h/h_rec > mc: mc = h/h_rec
return (avg,me,mc)
def distortion_row(H1, H2, n, row):
mc, me, avg, good = 0,0,0,0
for i in range(n):
if i != row and entry_is_good(H1[i], H2[i]):
(_avg,me,mc) = distortion_entry(H1[i], H2[i],me,mc)
good += 1
avg += _avg
avg /= good if good > 0 else 1.0
return (mc, me, avg, n-1-good)
def distortion(H1, H2, n, jobs):
H1, H2 = np.array(H1), np.array(H2)
dists = Parallel(n_jobs=jobs)(delayed(distortion_row)(H1[i,:],H2[i,:],n,i) for i in range(n))
dists = np.vstack(dists)
mc = max(dists[:,0])
me = max(dists[:,1])
# wc = max(dists[:,0])*max(dists[:,1])
avg = sum(dists[:,2])/n
bad = sum(dists[:,3])
return (mc, me, avg, bad)
def map_via_edges(G, i, h_rec):
neighbors = set(map(int, G.getrow(i).indices))
sorted_dist = np.argsort(h_rec)
m = len(neighbors)
precs = np.zeros(m)
n_correct = 0
j = 0
n = h_rec.size
n_idx = np.array(list(neighbors), dtype=np.int)
sds = sorted_dist[1:(m+1)]
# print(f"{n_idx} {type(n_idx)} {n_idx.dtype}")
# print(f"i={i} neighbors={neighbors} {sds} {h_rec[n_idx]} {h_rec[sds]}")
# skip yourself, you're always the nearest guy
for i in range(1,n):
if sorted_dist[i] in neighbors:
n_correct += 1
precs[j] = n_correct/float(i)
j += 1
if j == m:
break
return np.sum(precs)/min(n,m)
# return np.sum(precs)/j
def map_row(H1, H2, n, row, verbose=False):
edge_mask = (H1 == 1.0)
m = np.sum(edge_mask).astype(int)
assert m > 0
if verbose: print(f"\t There are {m} edges for {row} of {n}")
d = H2
sorted_dist = np.argsort(d)
if verbose:
print(f"\t {sorted_dist[0:5]} vs. {np.array(range(n))[edge_mask]}")
print(f"\t {d[sorted_dist[0:5]]} vs. {H1[edge_mask]}")
precs = np.zeros(m)
n_correct = 0
j = 0
# skip yourself, you're always the nearest guy
# TODO (A): j is redundant here
for i in range(1,n):
if edge_mask[sorted_dist[i]]:
n_correct += 1
precs[j] = n_correct/float(i)
j += 1
if j == m:
break
return np.sum(precs)/m
def map_score(H1, H2, n, jobs):
#maps = Parallel(n_jobs=jobs)(delayed(map_row)(H1[i,:],H2[i,:],n,i) for i in range(n))
maps = [map_row(H1[i,:],H2[i,:],n,i) for i in range(n)]
return np.sum(maps)/n
| hyperbolics-master | utils/distortions.py |
# load_dist.py
import networkx as nx
import numpy as np
import pickle
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
#import data_prep as dp
import time
import torch
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
sys.path.insert(0, parentdir+"/pytorch")
import pytorch.hyperbolic_models
def compute_row(i, adj_mat):
return csg.dijkstra(adj_mat, indices=[i], unweighted=True, directed=False)
def save_dist_mat(G, file):
n = G.order()
print("Number of nodes is ", n)
adj_mat = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
t = time.time()
num_cores = multiprocessing.cpu_count()
dist_mat = Parallel(n_jobs=20)(delayed(compute_row)(i,adj_mat) for i in range(n))
dist_mat = np.vstack(dist_mat)
print("Time elapsed = ", time.time()-t)
pickle.dump(dist_mat, open(file,"wb"))
def load_dist_mat(file):
return pickle.load(open(file,"rb"))
def unwrap(x):
""" Extract the numbers from (sequences of) pytorch tensors """
if isinstance(x, list) : return [unwrap(u) for u in x]
if isinstance(x, tuple): return tuple([unwrap(u) for u in list(x)])
return x.detach().cpu().numpy()
def load_emb_dm(file):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
m = torch.load(file).to(device)
H = unwrap(m.dist_matrix())
return H
def get_dist_mat(G, parallelize=True):
n = G.order()
print("Number of nodes is ", n)
adj_mat = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
t = time.time()
num_cores = multiprocessing.cpu_count() if parallelize else 1
dist_mat = Parallel(n_jobs=num_cores)(delayed(compute_row)(i,adj_mat) for i in range(n))
dist_mat = np.vstack(dist_mat)
print("Time elapsed = ", time.time()-t)
return dist_mat
| hyperbolics-master | utils/load_dist.py |
# This is to load data
# the graph needs to be prepared; for example utils.data_prep preprocesses and saves prepared edge lists
import networkx as nx
# def load_graph(file_name, directed=False):
# container = nx.DiGraph() if directed else nx.Graph()
# G = nx.read_edgelist(file_name, data=(('weight',float),), create_using=container)
# G_comp = nx.convert_node_labels_to_integers(G)
# return G_comp
def load_graph(file_name, directed=False):
G = nx.DiGraph() if directed else nx.Graph()
with open(file_name, "r") as f:
for line in f:
tokens = line.split()
u = int(tokens[0])
v = int(tokens[1])
if len(tokens) > 2:
w = float(tokens[2])
G.add_edge(u, v, weight=w)
else:
G.add_edge(u,v)
return G
| hyperbolics-master | utils/load_graph.py |
from __future__ import unicode_literals, print_function, division
import os
import numpy as np
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import math
from io import open
import unicodedata
import string
import re
import random
import json
import mapping_utils as util
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
euclidean_embeddings = {}
saved_tensors = os.listdir("tree_emb_saved/")
indices = []
for file in saved_tensors:
idx = int(file.split(".")[0])
indices.append(idx)
euclidean_embeddings[idx] = torch.load("tree_emb_saved/"+str(file), map_location=torch.device('cpu'))
#Riemannian SGD
from torch.optim.optimizer import Optimizer, required
spten_t = torch.sparse.FloatTensor
def poincare_grad(p, d_p):
"""
Calculates Riemannian grad from Euclidean grad.
Args:
p (Tensor): Current point in the ball
d_p (Tensor): Euclidean gradient at p
"""
if d_p.is_sparse:
p_sqnorm = torch.sum(
p.data[d_p._indices()[0].squeeze()] ** 2, dim=1,
keepdim=True
).expand_as(d_p._values())
n_vals = d_p._values() * ((1 - p_sqnorm) ** 2) / 4
d_p = spten_t(d_p._indices(), n_vals, d_p.size())
else:
p_sqnorm = torch.sum(p.data ** 2, dim=-1, keepdim=True)
d_p = d_p * ((1 - p_sqnorm) ** 2 / 4).expand_as(d_p)
return d_p
def euclidean_grad(p, d_p):
return d_p
def retraction(p, d_p, lr):
# Gradient clipping.
if torch.all(d_p < 1000) and torch.all(d_p>-1000):
p.data.add_(-lr, d_p)
class RiemannianSGD(Optimizer):
r"""Riemannian stochastic gradient descent.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rgrad (Function): Function to compute the Riemannian gradient from
an Euclidean gradient
retraction (Function): Function to update the parameters via a
retraction of the Riemannian gradient
lr (float): learning rate
"""
def __init__(self, params, lr=required, rgrad=required, retraction=required):
defaults = dict(lr=lr, rgrad=rgrad, retraction=retraction)
super(RiemannianSGD, self).__init__(params, defaults)
def step(self, lr=None):
"""Performs a single optimization step.
Arguments:
lr (float, optional): learning rate for the current update.
"""
loss = None
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if lr is None:
lr = group['lr']
d_p = group['rgrad'](p, d_p)
group['retraction'](p, d_p, lr)
return loss
# Does Euclidean to hyperbolic mapping using series of FC layers.
# We use ground truth distance matrix for the pair since the distortion for hyperbolic embs are really low.
def trainFCHyp(input_matrix, ground_truth, n, mapping, mapping_optimizer):
mapping_optimizer.zero_grad()
loss = 0
output = mapping(input_matrix.float())
dist_recovered = util.distance_matrix_hyperbolic(output)
loss += util.distortion(ground_truth, dist_recovered, n)
loss.backward()
mapping_optimizer.step()
return loss.item()
def trainFCIters(mapping, n_epochs=5, n_iters=500, print_every=50, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0
plot_loss_total = 0
mapping_optimizer = RiemannianSGD(mapping.parameters(), lr=learning_rate, rgrad=poincare_grad, retraction=retraction)
training_pairs = [util.pairfromidx(idx) for idx in range(n_iters)]
for i in range(n_epochs):
print("Starting epoch "+str(i))
iter=1
for idx in indices:
input_matrix = euclidean_embeddings[idx]
target_matrix = training_pairs[idx][1]
n = training_pairs[idx][2]
loss = trainFCHyp(input_matrix, target_matrix, n, mapping, mapping_optimizer)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (util.timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
iter+=1
input_size = 10
output_size = 10
mapping = nn.Sequential(
nn.Linear(input_size, 50).to(device),
nn.ReLU().to(device),
nn.Linear(50, output_size).to(device),
nn.ReLU().to(device))
trainFCIters(mapping)
| hyperbolics-master | scratch/tree_mapping.py |
"""This file contains core hyperbolic operations for learning modules."""
import numpy as np
import random
import os
import logging
from numpy import linalg as la
from numpy import random
import torch
import torch.nn.functional as F
import torch.nn as nn
EPS = 1e-15
PROJ_EPS = 1e-5
MAX_TANH_ARG = 15.0
def torch_norm(x):
return torch.norm(x, dim=1, keepdim=True)
def torch_project_hyp_vec(v, c=1):
"""Projects the hyperbolic vectors to the inside of the ball."""
# clip_norm = torch.tensor(1-PROJ_EPS)
# clipped_v = F.normalize(v, p=2, dim=1)*clip_norm
# return clipped_v
return v
def t_arctanh(v):
return 0.5*torch.log((1+v)/(1-v))
def torch_lambda_x(x, c=1):
return 2. / (1 - c * torch.dot(x,x))
def torch_dot(x, y):
return torch.sum(x * y, dim=1, keepdim=True)
def torch_atanh(x):
return t_arctanh(torch.min(x, torch.tensor(1. - EPS)))
def torch_tanh(x):
return torch.tanh(torch.min(torch.max(x, torch.tensor(-MAX_TANH_ARG)), torch.tensor(MAX_TANH_ARG)))
def torch_lambda_x(x, c=1):
return 2./(1-torch_dot(x,x))
def hyp_add_mob(u, v, c=1):
num = (1.0 + 2.0 * c * np.dot(u, v) + c * la.norm(v)**2) * \
u + (1.0 - c * la.norm(u)**2) * v
denom = 1.0 + 2.0 * c * np.dot(u, v) + c**2 * la.norm(v)**2 * la.norm(u)**2
return num/denom
def torch_hyp_add(u, v, c=1):
"""Accepts torch tensors u, v and returns their sum in hyperbolic
space in tensor format. Radius of the open ball is 1/sqrt(c). """
v = v+torch.tensor(EPS)
torch_dot_u_v = 2 * torch_dot(u, v)
torch_norm_u_sq = torch_dot(u,u)
torch_norm_v_sq = torch_dot(v,v)
denominator = 1. + torch_dot_u_v + torch_norm_v_sq * torch_norm_u_sq
result = (1. + torch_dot_u_v + torch_norm_v_sq) / denominator * u + (1. - torch_norm_u_sq) / denominator * v
return torch_project_hyp_vec(result)
#This is our definition which is compatible.
def hyp_scale_amb(r, x):
"""Scales x in hyperbolic space with r using the ambient space approach."""
if r == 1:
return x
else:
x_dist = (1+np.linalg.norm(x))/(1-np.linalg.norm(x))
alpha = 1-2/(1+x_dist**r)
alpha *= 1/np.linalg.norm(x)
product = alpha*x
return product
def hyp_scale_exp(r, x):
"""Scalar mult using exp map approach."""
return exp_map(0, r*log_map(0, x))
def hyp_add(u, v, c=1):
num = (1.0 + 2.0 * c * np.dot(u, v) + c * la.norm(v)**2) * \
u + (1.0 - c * la.norm(u)**2) * v
denom = 1.0 + 2.0 * c * np.dot(u, v) + c**2 * la.norm(v)**2 * la.norm(u)**2
return num/denom
def exp_map(x, v, c=1):
term = np.tanh(np.sqrt(c) * 2. / (1 - c * la.norm(x)**2) *
la.norm(v) / 2) / (np.sqrt(c) * la.norm(v)) * v
return hyp_add_mob(x, term, c)
def torch_scale_exp(r, x):
"""Scalar mult using exp map approach in torch."""
zero = torch.zeros(x.shape)
return torch_exp_map(zero, r*torch_log_map(zero, x))
def log_map(x, y, c=1):
diff = hyp_add_mob(-x, y, c)
lam = 2. / (1 - c * la.norm(x)**2)
return 2. / (np.sqrt(c) * lam) * np.arctanh(np.sqrt(c) * la.norm(diff)) / (la.norm(diff)) * diff
def torch_exp_map(x, v, c=1):
"""Exp map for the vector v lying on the tangent space T_xM to
the point x in the manifold M."""
v = v + torch.tensor(EPS)
norm_v = torch_norm(v)
term = (torch_tanh(torch_lambda_x(x, c) * norm_v / 2) / (norm_v)) * v
return torch_hyp_add(x, term, c)
def torch_log_map_x(x, y, c=1):
diff = torch_hyp_add(-x, y, c)+torch.tensor(EPS)
norm_diff = torch_norm(diff)
lam = torch_lambda_x(x, c)
return ( (2 / lam) * torch_atanh(norm_diff) / norm_diff) * diff
def torch_exp_map_zero(v, c=1):
# v = v + EPS # Perturbe v to avoid dealing with v = 0
v=v+torch.tensor(EPS)
norm_v = torch_norm(v)
result = torch_tanh(norm_v) / (norm_v) * v
return torch_project_hyp_vec(result, c)
def torch_log_map_zero(y, c=1):
# diff = y + EPS
diff = y+torch.tensor(EPS)
norm_diff = torch_norm(diff)
return torch_atanh(norm_diff) / norm_diff * diff
def mv_mul_hyp(M, x, c=1):
Mx_norm = la.norm(M.dot(x))
x_norm = la.norm(x)
return 1. / np.sqrt(c) * np.tanh(Mx_norm / x_norm * np.arctanh(np.sqrt(c) * x_norm)) / Mx_norm * (M.dot(x))
def torch_mv_mul_hyp(M, x, c=1):
x = x + torch.tensor(EPS)
Mx = torch.matmul(x, M)+torch.tensor(EPS)
MX_norm = torch_norm(Mx)
x_norm = torch_norm(x)
result = torch_tanh(MX_norm / x_norm * torch_atanh(x_norm)) / MX_norm * Mx
return torch_project_hyp_vec(result, c)
# x is hyperbolic, u is Euclidean. Computes diag(u) \otimes x.
def torch_pointwise_prod(x, u, c=1):
x = x+torch.tensor(EPS)
Mx = x * u + torch.tensor(EPS)
MX_norm = torch_norm(Mx)
x_norm = torch_norm(x)
result = torch_tanh(MX_norm / x_norm * torch_atanh(x_norm)) / MX_norm * Mx
return torch_project_hyp_vec(result, c)
def hyp_non_lin(v, activation):
logmap = log_map(0, v, 1)
return exp_map(v, activation(logmap), 1)
def euclidean_softmax(x):
"""Euclidean softmax."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def hyp_softmax_k(a_k, p_k, x, c=1):
#This needs to be done for every class k in number of classes.
"""Hyperbolic softmax.
a_k is a Euclidean and p_k is a hyperbolic parameter."""
minus_p_plus_x = torch_hyp_add(-p_k, x, c)
norm_a = torch.norm(a_k)
lambda_px = torch_lambda_x(minus_p_plus_x, c)
px_dot_a = torch.dot(minus_p_plus_x, a_k/norm_a)
return 2 * norm_a * torch.asinh(px_dot_a * lambda_px)
| hyperbolics-master | scratch/learning_util.py |
from __future__ import unicode_literals, print_function, division
import os
import numpy as np
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import math
from io import open
import unicodedata
import string
import re
import random
import json
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Distortion calculations
def acosh(x):
return torch.log(x + torch.sqrt(x**2-1))
def dist_h(u,v):
z = 2*torch.norm(u-v,2)**2
uu = 1. + torch.div(z,((1-torch.norm(u,2)**2)*(1-torch.norm(v,2)**2)))
return acosh(uu)
def distance_matrix_euclidean(input):
row_n = input.shape[0]
mp1 = torch.stack([input]*row_n)
mp2 = torch.stack([input]*row_n).transpose(0,1)
dist_mat = torch.sum((mp1-mp2)**2,2).squeeze()
return dist_mat
def distance_matrix_hyperbolic(input):
row_n = input.shape[0]
dist_mat = torch.zeros(row_n, row_n, device=device)
for row in range(row_n):
for i in range(row_n):
if i != row:
dist_mat[row, i] = dist_h(input[row,:], input[i,:])
return dist_mat
def entry_is_good(h, h_rec): return (not torch.isnan(h_rec)) and (not torch.isinf(h_rec)) and h_rec != 0 and h != 0
def distortion_entry(h,h_rec):
avg = abs(h_rec - h)/h
return avg
def distortion_row(H1, H2, n, row):
avg, good = 0, 0
for i in range(n):
if i != row and entry_is_good(H1[i], H2[i]):
_avg = distortion_entry(H1[i], H2[i])
good += 1
avg += _avg
if good > 0:
avg /= good
else:
avg, good = torch.tensor(0., device=device, requires_grad=True), torch.tensor(0., device=device, requires_grad=True)
return (avg, good)
def distortion(H1, H2, n, jobs=16):
# dists = Parallel(n_jobs=jobs)(delayed(distortion_row)(H1[i,:],H2[i,:],n,i) for i in range(n))
dists = (distortion_row(H1[i,:],H2[i,:],n,i) for i in range(n))
to_stack = [tup[0] for tup in dists]
avg = torch.stack(to_stack).sum()/n
return avg
#Loading the graph and getting the distance matrix.
def load_graph(file_name, directed=False):
G = nx.DiGraph() if directed else nx.Graph()
with open(file_name, "r") as f:
for line in f:
tokens = line.split()
u = int(tokens[0])
v = int(tokens[1])
if len(tokens) > 2:
w = float(tokens[2])
G.add_edge(u, v, weight=w)
else:
G.add_edge(u,v)
return G
def compute_row(i, adj_mat):
return csg.dijkstra(adj_mat, indices=[i], unweighted=True, directed=False)
def get_dist_mat(G):
n = G.order()
adj_mat = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
t = time.time()
num_cores = multiprocessing.cpu_count()
dist_mat = Parallel(n_jobs=num_cores)(delayed(compute_row)(i,adj_mat) for i in range(n))
dist_mat = np.vstack(dist_mat)
return dist_mat
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def pairfromidx(idx):
G = load_graph("random_trees_edges/"+str(idx)+".edges")
target_matrix = get_dist_mat(G)
target_tensor = torch.from_numpy(target_matrix).float().to(device)
target_tensor.requires_grad = False
n = G.order()
return ([], target_tensor, n, []) | hyperbolics-master | scratch/mapping_utils.py |
import argh
import os
import subprocess
import itertools
# ranks = [2,5,10,50,100,200]
ranks = [200]
def run_comb2(run_name, datasets):
os.makedirs(f"{run_name}/comb_dim2", exist_ok=True)
params = []
rank = 2
epss = [1.0, 0.1]
precision = 8192
for dataset, eps in itertools.product(datasets, epss):
# julia comb.jl -d ../data/edges/phylo_tree.edges -e 1.0 -p 256 -s -r 200 -c -m phylo_tree.save -a
param = [
'-d', f"data/edges/{dataset}.edges",
# '-m', f"data/comb/{dataset}.r{rank}.p{precision}.e{eps}.emb",
'-m', f"{run_name}/comb_embeddings/{dataset}.r{rank}.p{precision}.e{eps}.emb",
'-p', str(precision),
'-e', str(eps),
'-r', str(rank)
]
params.append(" ".join(param))
cmd = 'julia combinatorial/comb.jl'
with open(f"{run_name}/comb.r2.cmds", "w") as cmd_log:
cmd_log.writelines('\n'.join(params))
with open(f"{run_name}/comb.r2.log", "w") as log:
subprocess.run(" ".join(['parallel', ':::', *[f'"{cmd} {p}"' for p in params]]),
shell=True, stdout=log)
def run_comb(run_name, datasets, precision=256):
os.makedirs(f"{run_name}/comb_embeddings", exist_ok=True)
params = []
for dataset, rank in itertools.product(datasets, ranks):
# julia comb.jl -d ../data/edges/phylo_tree.edges -e 1.0 -p 256 -s -r 200 -c -m phylo_tree.save -a
param = [
'-d', f"data/edges/{dataset}.edges",
# '-m', f"data/comb/{dataset}.r{rank}.p{precision}.emb",
'-m', f"{run_name}/comb_embeddings/{dataset}.r{rank}.p{precision}.emb",
'-p', str(precision),
'-e', '1.0',
'-r', str(rank),
'-a']
if rank > 10:
param.append('-c')
params.append(" ".join(param))
cmd = 'julia combinatorial/comb.jl'
with open(f"{run_name}/comb.p{precision}.cmds", "w") as cmd_log:
cmd_log.writelines('\n'.join(params))
with open(f"{run_name}/comb.p{precision}.log", "w") as log:
full_cmd = " ".join(['parallel', ':::', *[f'"{cmd} {p}"' for p in params]])
print(full_cmd)
subprocess.run(" ".join(['parallel', ':::', *[f'"{cmd} {p}"' for p in params]]),
shell=True, stdout=log)
def run_pytorch(run_name, datasets, epochs, batch_size, warm_start=False, comb=False):
precision = None
if warm_start:
# run combinatorial code first in double precision
precision = 53
if comb:
run_comb(run_name, datasets, precision=precision)
learning_rate = 5
params = []
# with open(f"{run_name}/pytorch.params", "w") as param_file:
# param_file.writelines("\n".join(params))
for dataset, rank in itertools.product(datasets, ranks):
log_w = ".w" if warm_start else ""
log_name = f"{run_name}/{dataset}{log_w}.r{rank}.log"
param = [
f"data/edges/{dataset}.edges",
'--log-name', log_name,
'--batch-size', str(batch_size),
'--epochs', str(epochs),
'-r', str(rank),
'--checkpoint-freq', '100',
'--use-svrg',
'-T 0',
# '--subsample 2000',
'--learning-rate', str(learning_rate)]
if warm_start:
param += ['--warm-start', f"{run_name}/comb_embeddings/{dataset}.r{rank}.p{precision}.emb"]
params.append(" ".join(param))
cmd = " ".join([ 'CUDA_VISIBLE_DEVICES=0', 'python', 'pytorch/pytorch_hyperbolic.py', 'learn' ])
# print(*[f'"{cmd} {p}"' for p in params])
# subprocess.run(['parallel',
# ':::',
# *[f'"{cmd} {p}"' for p in params]
# ], shell=True)
parallel_cmd = " ".join(['parallel',
':::',
*[f'"{cmd} {p}"' for p in params]
])
print(parallel_cmd)
subprocess.run(parallel_cmd, shell=True)
@argh.arg("run_name", help="Directory to store the run; will be created if necessary")
@argh.arg('-d', "--datasets", nargs='+', type=str, help = "Datasets")
@argh.arg("--epochs", help="Number of epochs to run Pytorch optimizer")
@argh.arg("--batch-size", help="Batch size")
def run(run_name, datasets=[], epochs=5000, batch_size=1024):
os.makedirs(run_name, exist_ok=True)
# combinatorial high dim
# run_comb(run_name, datasets)
# 2d combinatorial
# run_comb2(run_name, datasets)
# pytorch by itself
run_pytorch(run_name, datasets, epochs=epochs, batch_size=batch_size, warm_start=False)
# pytorch with warmstart
run_pytorch(run_name, datasets, epochs=epochs, batch_size=batch_size, warm_start=True, comb=True)
if __name__ == '__main__':
_parser = argh.ArghParser()
# _parser.add_commands([build])
# _parser.dispatch()
_parser.set_default_command(run)
_parser.dispatch()
| hyperbolics-master | scripts/run_exps.py |
import nltk
from nltk.corpus import wordnet as wn
import numpy as np
import networkx as nx
from scipy.sparse import csr_matrix
import json
from collections import defaultdict
import matplotlib.pyplot as plt
def make_edge_set(): return ([],([],[]))
def add_edge(e,i,j):
(v,(row,col)) = e
row.append(i)
col.append(j)
v.append(1)
def get_hyponym_tree(syn,d,e,counter):
if syn in d.keys():
syn_idx = d[syn]
elif syn not in d.keys():
counter +=1
syn_idx = counter
d[syn] = syn_idx
curr_list = syn.hyponyms()
if len(curr_list) != 0:
for hyp in curr_list:
if hyp in d.keys():
hyp_idx = d[hyp]
elif hyp not in d.keys():
counter +=1
hyp_idx = counter
d[hyp] = hyp_idx
add_edge(e, syn_idx, hyp_idx)
add_edge(e, hyp_idx, syn_idx)
e, d, counter = get_hyponym_tree(hyp,d,e,counter)
return e, d, counter
d = dict()
IDtoWord = dict()
e = make_edge_set()
word = "attribute"
immediate_synsets = wn.synsets(word)
print(immediate_synsets)
counter = 0
num_syns = 0
for syn in immediate_synsets:
num_syns += len(syn.hyponyms())
d[syn] = 0
e, d, counter = get_hyponym_tree(syn, d, e, counter)
#Get some stats.
mat_shape = max(e[1][1])
M = csr_matrix(e, shape=(mat_shape+1, mat_shape+1))
G = nx.from_scipy_sparse_matrix(M)
print("Number of edges:")
print(len(e[0]))
print("Number of nodes:")
print(max(e[1][1]))
print("Degree of main node:")
print(G.degree(0))
print("Degree should be:")
print(num_syns)
for key, val in d.items():
if val != 0:
name = key.name().split('.')[0]
IDtoWord[val] = name
IDtoWord[0] = word
#Save stuff.
nx.write_edgelist(G, "data/edges/wn_small.edges",data=False)
json.dump(IDtoWord, open("data/edges/wn_small_dict.txt","w"))
# nx.draw_networkx(G, with_labels=True)
# plt.show()
| hyperbolics-master | scripts/wn_small_gen.py |
import argh, os
from collections import defaultdict
#cat run_file.sh | parallel -P 4 "source path.src; bash -c {}"
def work_command(run_name, dataset, rank, gpu, batch_size, epochs, scale):
run_stem = f"{run_name}/dataset_{dataset}.r={rank}"
exec_str = f"CUDA_VISIBLE_DEVICES=\"{gpu}\" python pytorch/pytorch_hyperbolic.py learn {dataset} -s {scale} --model-save-file {run_stem}.model -r {rank} --batch-size {batch_size} --epochs {epochs} --log-name {run_stem}.log"
return exec_str
def get_scale_dict(col=1, scale_file="scripts/scale_eps_1.txt"):
with open(scale_file) as fh: ls = fh.readlines()
d = dict()
for l in ls:
l.strip()
k,v = l.strip().split("\t")
d[k] = v
return d
@argh.arg("run_name", help="Director to store the run")
@argh.arg("--epochs", help="Number of epochs to run")
@argh.arg("--batch-size", help="Batch Size")
@argh.arg("--gpus", help="Number of GPUS")
@argh.arg("--nParallel", help="Number of Concurrent jobs")
def build(run_name, epochs=100, batch_size=16384, gpus=2, nParallel=3):
os.mkdir(run_name)
scale_dict = get_scale_dict()
cmds = defaultdict(list)
for dataset in range(1,13):
gpu = dataset % gpus
for rank in [2,5,10,50,100,200]:
cmds[gpu].append(work_command(run_name, dataset, rank, gpu, batch_size, epochs, scale_dict[str(dataset)]))
cmd_files = []
for gpu in range(gpus):
fname = f"{run_name}/run.{gpu}.cmds"
with open(fname,"w") as fh:
fh.writelines("\n".join(cmds[gpu]))
cmd_files.append(fname)
exec_cmd = "\"source path.src; bash -c {}\""
with open(f"{run_name}/drive.sh", "w") as fh:
cmds = []
for cmd_f in cmd_files:
cmd = f"cat {cmd_f} | parallel --gnu -P {nParallel} {exec_cmd}"
cmds.append(cmd)
fh.writelines("\n".join(cmds))
with open(f"{run_name}/main.sh", "w") as fh:
fh.writelines(f"cat {run_name}/drive.sh | parallel --gnu -P {gpus} {exec_cmd}")
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.add_commands([build])
_parser.dispatch()
| hyperbolics-master | scripts/generate_pytorch.py |
import os
import subprocess
import itertools
import random
ranks = [10, 20]
for file in os.listdir(".\data\hmds-graphs"):
file_base = file.split('.')[0]
cmd_base = "julia hMDS\hmds-simple.jl"
cmd_edges = " -d data\edges\\" + file_base + ".edges"
cmd_emb = " -k data\emb\\" + file_base + ".emb"
cmd_rank = " -r "
cmd_scale = " -t "
for rank in ranks:
print("Rank = ", rank)
for i in range(10):
scale = 0.1*(i+1)
cmd = cmd_base + cmd_edges + cmd_emb + cmd_rank + str(rank) + cmd_scale + str(scale)
#print(cmd)
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
res_string = result.stdout.decode('utf-8')
res_lines = res_string.splitlines()
# grab our values
distortion = res_lines[16].split()[5].strip(",")
mapval = res_lines[17].split()[2]
print("Scale \t", scale, "\t distortion \t", distortion, "\t mAP \t", mapval) | hyperbolics-master | scripts/hmds-runs.py |
import sys, os, subprocess
import shutil
import numpy as np
import pandas
def comb(edge_file, distance_file, flags):
comb_cmd = ['julia', 'combinatorial/comb.jl',
'--dataset', edge_file,
'--save-distances', distance_file] + flags
print(comb_cmd)
print()
subprocess.run(comb_cmd)
def stats(edge_file, distance_file):
# Find distance matrix chunks
chunk_i = -1
n_ = 0
map_ = 0.0
d_avg_ = 0.0
wc_ = 0.0
files = []
while True:
chunk_i += 1
chunk_file = f"{distance_file}.{chunk_i}"
chunk_exists = os.path.isfile(chunk_file)
if not chunk_exists:
break
files.append(chunk_file)
parallel_stats_cmd = ['parallel', 'python', 'combinatorial/stats.py', edge_file, ':::'] + files
print(parallel_stats_cmd)
print()
print("before subprocess call")
subprocess.run(parallel_stats_cmd)
stats_file = f"{distance_file}.stats"
cat_cmd = ['cat'] + [f+'.stats' for f in files]
with open(stats_file, "w") as s:
subprocess.run(cat_cmd, stdout=s)
_stats = pandas.read_csv(stats_file, header=None, index_col=False).as_matrix()
n_ = np.sum(_stats[:,0])
map_ = np.sum(_stats[:,1])
d_avg_ = np.sum(_stats[:,2])
dc_ = np.max(_stats[:,3])
de_ = np.max(_stats[:,4])
print(f"Final MAP = {map_/n_}")
print(f"Final d_avg = {d_avg_/n_}, d_wc = {dc_*de_}, d_c = {dc_}, d_e = {de_}")
if __name__ == '__main__':
dataset = sys.argv[1]
stats_dataset = sys.argv[2]
flags = sys.argv[3:]
os.makedirs(f"distances/{dataset}", exist_ok=True)
edge_file = f"data/edges/{dataset}.edges"
stats_edge_file = f"data/edges/{stats_dataset}.edges"
distance_file = f"distances/{dataset}/{dataset}{''.join(flags)}.dist"
comb(edge_file, distance_file, flags)
stats(stats_edge_file, distance_file)
| hyperbolics-master | scripts/comb_stats.py |
import argh, os
from collections import defaultdict
#cat run_file.sh | parallel -P 4 "source path.src; bash -c {}"
def work_command(run_name, dataset, rank, gpu, batch_size, epochs, scale):
run_stem = f"{run_name}/dataset_{dataset}.r={rank}"
exec_str = f"CUDA_VISIBLE_DEVICES=\"{gpu}\" python pytorch/pytorch_hyperbolic.py learn {dataset} -s {scale} --model-save-file {run_stem}.model -r {rank} --batch-size {batch_size} --epochs {epochs} --log-name {run_stem}.log --print-freq 10"
return exec_str
def get_scale_dict(scale_file):
with open(scale_file) as fh: ls = fh.readlines()
d = dict()
for l in ls:
l.strip()
k,v = l.strip().split("\t")
d[k] = v
return d
@argh.arg("run_name", help="Director to store the run")
@argh.arg("--epochs", help="Number of epochs to run")
@argh.arg("--batch-size", help="Batch Size")
@argh.arg("--gpus", help="Number of GPUS")
@argh.arg("--nParallel", help="Number of Concurrent jobs")
@argh.arg("--scale-file", help="File with dictionary of scalings for datatsets")
def build(run_name, epochs=100, batch_size=16384, gpus=2, nParallel=3, scale_file="scripts/scale_eps_1.txt"):
os.mkdir(run_name)
scale_dict = get_scale_dict(scale_file)
cmds = defaultdict(list)
for dataset in [12,13,6,7,11]:
gpu = dataset % gpus
for rank in [2,5,10,50,100,200]:
cmds[gpu].append(work_command(run_name, dataset, rank, gpu, batch_size, epochs, scale_dict[str(dataset)]))
cmd_files = []
for gpu in range(gpus):
fname = f"{run_name}/run.{gpu}.cmds"
with open(fname,"w") as fh:
fh.writelines("\n".join(cmds[gpu]))
cmd_files.append(fname)
exec_cmd = "\"source path.src; bash -c {}\""
with open(f"{run_name}/drive.sh", "w") as fh:
cmds = []
for cmd_f in cmd_files:
cmd = f"cat {cmd_f} | parallel --gnu -P {nParallel} {exec_cmd}"
cmds.append(cmd)
fh.writelines("\n".join(cmds))
with open(f"{run_name}/main.sh", "w") as fh:
fh.writelines(f"cat {run_name}/drive.sh | parallel --gnu -P {gpus} {exec_cmd}")
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.add_commands([build])
_parser.dispatch()
| hyperbolics-master | scripts/generate_pytorch_hp.py |
import os
import argh
import subprocess
import itertools
import random
# ranks = [2,5,10,50,100,200]
datasets = [
# "synthetic/sierp-C50-2",
# "synthetic/sierp-C5-6",
# "synthetic/diamond7"
# "synthetic/sierp-K3-8"
# "synthetic/tree-20-3"
# "smalltree"
# "bio-yeast", # 1458
# "web-edu", # 3031
# "grqc", # 4158
# "ca-CSphd",
# "facebook_combined",
# "inf-power", # 4941
# "california", # 5925
"usca312",
"bookend"
]
datasets = datasets[:-1]
# 100 dimensions
models100 = [
#{'dim': 100, 'hyp': 1, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 100, 'euc': 1, 'sdim': 0, 'sph': 0},
#{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 100, 'sph': 1},
# {'dim': 10, 'hyp': 10, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
# {'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 10, 'sph': 10},
#{'dim': 5, 'hyp': 20, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
#{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 5, 'sph': 20},
#{'dim': 2, 'hyp': 50, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
#{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 50},
# {'dim': 50, 'hyp': 1, 'edim': 0, 'euc': 0, 'sdim': 50, 'sph': 1},
# {'dim': 5, 'hyp': 10, 'edim': 0, 'euc': 0, 'sdim': 5, 'sph': 10},
# {'dim': 2, 'hyp': 50, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 50},
]
# 20 dimensions
models20 = [
{'dim': 20, 'hyp': 1, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 20, 'euc': 1, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 20, 'sph': 1},
{'dim': 10, 'hyp': 2, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 10, 'sph': 2},
{'dim': 2, 'hyp': 10, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 10},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 1, 'sph': 20},
{'dim': 10, 'hyp': 1, 'edim': 0, 'euc': 0, 'sdim': 10, 'sph': 1},
{'dim': 2, 'hyp': 5, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 5},
{'dim': 4, 'hyp': 2, 'edim': 4, 'euc': 1, 'sdim': 4, 'sph': 2},
{'dim': 2, 'hyp': 4, 'edim': 4, 'euc': 1, 'sdim': 2, 'sph': 4},
]
# 10 dimensions
models10 = [
{'dim': 10, 'hyp': 1, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 10, 'euc': 1, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 10, 'sph': 1},
{'dim': 5, 'hyp': 2, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 5, 'sph': 2},
{'dim': 2, 'hyp': 5, 'edim': 0, 'euc': 0, 'sdim': 0, 'sph': 0},
{'dim': 0, 'hyp': 0, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 5},
{'dim': 5, 'hyp': 1, 'edim': 0, 'euc': 0, 'sdim': 5, 'sph': 1},
{'dim': 2, 'hyp': 2, 'edim': 2, 'euc': 1, 'sdim': 2, 'sph': 2},
# {'dim': 2, 'hyp': 2, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 3},
# {'dim': 2, 'hyp': 3, 'edim': 0, 'euc': 0, 'sdim': 2, 'sph': 2},
# {'dim': 2, 'hyp': 1, 'edim': 6, 'euc': 1, 'sdim': 2, 'sph': 1},
# {'dim': 8, 'hyp': 1, 'edim': 2, 'euc': 1, 'sdim': 0, 'sph': 0},
# {'dim': 2, 'hyp': 4, 'edim': 2, 'euc': 1, 'sdim': 0, 'sph': 0},
# {'dim': 0, 'hyp': 0, 'edim': 2, 'euc': 1, 'sdim': 8, 'sph': 1},
# {'dim': 0, 'hyp': 0, 'edim': 2, 'euc': 1, 'sdim': 2, 'sph': 4}
]
models = models10 #+ models100
# lrs = [30, 100, 300, 1000]
# lrs = [10, 20, 40]
# lrs = [5, 10, 20]
lrs = [.001, .003, .01]
burn_ins = [0]
# CUDA_VISIBLE_DEVICES=1 python pytorch/pytorch_hyperbolic.py learn data/edges/synthetic/sierp-C50-2.edges --batch-size 65536 -d 50 --hyp 0 --euc 0 --edim 50 --sph 1 --sdim 51 -l 100.0 --epochs 1000 --checkpoint-freq 100 --resample-freq 500 -g --subsample 1024 --riemann --log-name C50-2.S50.log
def run_pytorch(run_name, gpus, gpc, epochs, batch_size):
params = []
# with open(f"{run_name}/pytorch.params", "w") as param_file:
# param_file.writelines("\n".join(params))
stuff = itertools.product(datasets, models, lrs, burn_ins)
hparams = list(stuff)
random.shuffle(hparams)
for dataset, model, lr, burn_in in hparams:
# log_w = ".w" if warm_start else ""
# log_name = f"{run_name}/{dataset}{log_w}.r{rank}.log"
H_name = "" if model['hyp' ]== 0 else f"H{model['dim']}-{model['hyp']}."
E_name = "" if model['euc' ]== 0 else f"E{model['edim']}-{model['euc']}."
S_name = "" if model['sph' ]== 0 else f"S{model['sdim']}-{model['sph']}."
log_name = f"{run_name}/{os.path.basename(dataset)}.{H_name}{E_name}{S_name}lr{lr}"
savefile = f"{run_name}/{os.path.basename(dataset)}.{H_name}{E_name}{S_name}lr{lr}"
if burn_in > 0:
log_name += f".burnin{burn_in}"
log_name += ".log"
savefile += ".emb"
param = [
f"data/edges/{dataset}.edges",
'--dim', str(model['dim']),
'--hyp', str(model['hyp']),
'--edim', str(model['edim']),
'--euc', str(model['euc']),
'--sdim', str(model['sdim']),
'--sph', str(model['sph']),
'--model-save-file', savefile,
# '--log',
'--log-name', log_name,
'--batch-size', str(batch_size),
'--epochs', str(epochs),
'--checkpoint-freq', '100',
'--resample-freq', '5000',
# '--use-svrg',
# '-T 0',
'-g', '--subsample 1024',
'--riemann',
'--learn-scale',
# '--logloss',
# '--distloss',
# '--squareloss',
# '--symloss',
'--burn-in', str(burn_in),
# '--momentum', '0.9',
'--learning-rate', str(lr)]
params.append(" ".join(param))
cmds = []
for i in range(gpus):
header = " ".join([ 'CUDA_VISIBLE_DEVICES='+str(i%gpc), 'python', 'pytorch/pytorch_hyperbolic.py', 'learn' ])
cmds = [f'{header} {p}' for p in params[i::gpus]]
with open(f"{run_name}/cmds{i}.sh", "w") as cmd_log:
cmd_log.writelines('\n'.join(cmds))
# all_cmds = [f'"{cmd0} {p}"' for p in params[0::2]] \
# + [f'"{cmd1} {p}"' for p in params[1::2]]
# parallel_cmd = " ".join(['parallel',
# ':::',
# *all_cmds
# ])
# print(parallel_cmd)
# with open(f"{run_name}/cmds.sh", "w") as cmd_log:
# cmd_log.writelines('\n'.join(all_cmds))
# subprocess.run(parallel_cmd, shell=True)
@argh.arg("run_name", help="Directory to store the run; will be created if necessary")
@argh.arg("--gpus", help="Total number of GPUs to use")
@argh.arg("--gpc", help="GPUs per machine")
# @argh.arg('-d', "--datasets", nargs='+', type=str, help = "Datasets")
@argh.arg("--epochs", help="Number of epochs to run Pytorch optimizer")
@argh.arg("--batch-size", help="Batch size")
def run(run_name, gpus=1, gpc=1, epochs=1000, batch_size=65536):
os.makedirs(run_name, exist_ok=True)
run_pytorch(run_name, gpus=gpus, gpc=gpc, epochs=epochs, batch_size=batch_size)
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.set_default_command(run)
_parser.dispatch()
| hyperbolics-master | scripts/products.py |
import glob, os, sys
import pandas as pd
if __name__ == '__main__':
run_name = sys.argv[1]
rows = []
for f in sorted(glob.glob(run_name + '/*.stat')):
# line = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0] + ' '
# with open(f, "r") as g:
# line += g.readline()
# rows.append(line)
name = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0]
row = pd.read_csv(f, delim_whitespace=True)
row.index = [name]
rows.append(row)
# print(row)
# os.remove(f)
table = pd.concat(rows)
# print(table)
print(table.to_string())
# .to_csv(f"{run_name}/{run_name}.stats", )
with open(f"{run_name}/{run_name}.stats", "w") as f:
# f.write('\n'.join(lines))
f.write(table.to_string())
| hyperbolics-master | scripts/collect_stats.py |
import argh, os
def work_command(run_name, dataset, rank, scale, prec, tol):
run_stem = f"{run_name}/dataset_{dataset}.r={rank}"
exec_str = f" julia mds-scale.jl {dataset} {rank} {scale} {prec} {tol} > {run_stem}.log"
return exec_str
def get_scale_dict(scale_file):
with open(scale_file) as fh: ls = fh.readlines()
d = dict()
for l in ls:
l.strip()
k,v = l.strip().split("\t")
d[k] = v
return d
@argh.arg("run_name", help="Director to store the run")
@argh.arg("--prec", help="Precision")
@argh.arg("--max-k", help="Max-k")
@argh.arg("--nParallel", help="Parallel")
@argh.arg("--scale-file", help="Scale File")
def tri(run_name, prec="2048", max_k=200, nParallel=6, scale_file="scripts/scale_eps_1.txt"):
os.mkdir(run_name)
scale_dict = get_scale_dict(scale_file)
cmds = list()
for dataset in range(1,13):
scale = scale_dict[str(dataset)]
cmds.append(f"julia serialize_helper.jl --prec {prec} --max_k {max_k} --scale {scale} {dataset} {run_name}/tri.{dataset}.jld --stats-file {run_name}/tri.{dataset}.stats")
fname = f"{run_name}/tri.run.cmds"
with open(fname,"w") as fh:
fh.writelines("\n".join(cmds))
exec_cmd = "\"source path.src; bash -c {}\""
with open(f"{run_name}/main.sh", "w") as fh:
fh.writelines(f"cat {run_name}/tri.run.cmds | parallel --gnu -P {nParallel} {exec_cmd}")
@argh.arg("run_name", help="Director to store the run")
@argh.arg("--prec", help="Precision")
@argh.arg("--tol", help="Tolerance")
def build(run_name, prec="2048", tol="100"):
os.mkdir(run_name)
scale_dict = get_scale_dict()
cmds = list()
for dataset in range(12,13):
for rank in [2,5,10,50,100,200]:
cmds.append(work_command(run_name, dataset, rank, scale_dict[str(dataset)], prec, tol))
cmd_files = []
fname = f"{run_name}/run.cmds"
with open(fname,"w") as fh:
fh.writelines(cmds)
cmd_files.append(fname)
exec_cmd = "\"source path.src; bash -c {}\""
with open(f"{run_name}/drive.sh", "w") as fh:
cmds = []
for cmd_f in cmd_files:
cmd = f"cat {cmd_f} | {exec_cmd}"
cmds.append(cmd)
fh.writelines("\n".join(cmds))
with open(f"{run_name}/main.sh", "w") as fh:
fh.writelines(f"cat {run_name}/drive.sh | {exec_cmd}")
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.add_commands([build, tri])
_parser.dispatch()
| hyperbolics-master | scripts/generate_mds.py |
# Copy and modified from
#
# https://github.com/mleszczy/pytorch_optimizers
#
from torch.optim.optimizer import Optimizer, required
import torch
import copy, logging
from torch.autograd import Variable
from hyperbolic_parameter import Hyperbolic_Parameter
#TODO(mleszczy): Be able to inherit from different optimizers
# NB: Note we choose the baseclass dynamically below.
class SVRG(torch.optim.SGD):
r"""Implements stochastic variance reduction gradient descent.
Args:
params (iterable): iterable of parameters to optimize
lr (float): learning rate
T (int): number of iterations between the step to take the full grad/save w
data_loader (DataLoader): dataloader to use to load training data
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
.. note::
"""
def __init__(self, params, lr=required, T=required, data_loader=required, weight_decay=0.0,opt=torch.optim.SGD):
defaults = dict(lr=lr, weight_decay=weight_decay)
self.__class__ = type(self.__class__.__name__,
(opt,object),
dict(self.__class__.__dict__))
logging.info(f"Using base optimizer {opt} in SVRG")
super(self.__class__, self).__init__(params, **defaults)
if len(self.param_groups) != 1:
raise ValueError("SVRG doesn't support per-parameter options "
"(parameter groups)")
# TODO(mleszczy): Add these to parameter group or state?
params = self.param_groups[0]['params']
self._params = params
self._curr_w = [p.data for p in params]
self._prev_w = [p.data.clone() for p in params]
# Gradients are lazily allocated and don't exist yet. However, gradients are
# the same shape as the weights so we can still allocate buffers here
self._curr_grad = [p.data.clone() for p in params]
self._prev_grad = [p.data.clone() for p in params]
self._full_grad = None
self.data_loader = data_loader
if T == 0:
T = len(self.data_loader)*3
logging.info(f"SVRG epoch: {T} batches")
self.state['t_iters'] = T
self._first_call = True
self.T = T # Needed to trigger full gradient
logging.info(f"Data Loader has {len(self.data_loader)} with batch {self.data_loader.batch_size}")
def __setstate__(self, state):
super(self.__class__, self).__setstate__(state)
def _zero_grad(self):
for p in self._params:
if p.grad is not None:
p.grad.detach()
p.grad.zero_()
def _set_weights_grad(self,ws,gs):
for idx, p in enumerate(self._params):
if ws is not None: p.data = ws[idx]
if gs is not None and p.grad is not None: p.grad.data = gs[idx]
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
# Calculate full gradient
if self.state['t_iters'] == self.T or self._first_call:
# Reset gradients before accumulating them
self._set_weights_grad(None, self._full_grad)
self._zero_grad()
# Accumulate gradients
for i, (data, target) in enumerate(self.data_loader):
closure(data, target)
# Adjust summed gradients by num_iterations accumulated over
# assert(n_iterations == len(self.data_loader))
for p in self._params:
if p.grad is not None:
p.grad.data /= len(self.data_loader)
# As the gradient is lazily allocated, on the first call,
# we make a copy.
if self._first_call:
assert(self._full_grad is None)
self._full_grad = [p.grad.data.clone() if p.grad is not None else None for p in self._params]
self._first_call = False
# Copy w to prev_w
for p, p0 in zip(self._curr_w, self._prev_w):
p0.copy_(p)
# Reset t
self.state['t_iters'] = 0
# Setup the previous grad
self._set_weights_grad(self._prev_w, self._prev_grad)
self._zero_grad()
closure()
# Calculate the current grad.
self._set_weights_grad(self._curr_w, self._curr_grad)
self._zero_grad()
loss = closure()
# Adjust the current gradient using the previous gradient and the full gradient.
# We have normalized so that these are all comparable.
for p, d_p0, fg in zip(self._params, self._prev_grad, self._full_grad):
# Adjust gradient in place
if p.grad is not None:
# NB: This should be _this_ batch.
p.grad.data -= (d_p0 - fg)
# Call optimizer update step
# TODO: Abstract this away.
Hyperbolic_Parameter.correct_metric(self._params)
super(self.__class__, self).step()
self.state['t_iters'] += 1
return loss
| hyperbolics-master | pytorch/svrg.py |
import math
import numpy as np
import torch
import copy
import logging
import os
import pickle as cp
# eps for numerical stability
eps = 1e-6
class YFOptimizer(object):
def __init__(self, var_list, lr=0.0001, mu=0.0, clip_thresh=None, weight_decay=0.0,
beta=0.999, curv_win_width=20, zero_debias=True, sparsity_debias=False, delta_mu=0.0,
auto_clip_fac=None, force_non_inc_step=False, h_max_log_smooth=True, h_min_log_smooth=True,
checkpoint_interval=1000, verbose=False, adapt_clip=True, stat_protect_fac=100.0, catastrophic_move_thresh=100.0,
use_disk_checkpoint=False, checkpoint_dir='./YF_workspace'):
'''
clip thresh is the threshold value on ||lr * gradient||
delta_mu can be place holder/variable/python scalar. They are used for additional
momentum in situations such as asynchronous-parallel training. The default is 0.0
for basic usage of the optimizer.
Args:
lr: python scalar. The initial value of learning rate, we use 1.0 in our paper.
mu: python scalar. The initial value of momentum, we use 0.0 in our paper.
clip_thresh: python scalar. The manaully-set clipping threshold for tf.clip_by_global_norm.
if None, the automatic clipping can be carried out. The automatic clipping
feature is parameterized by argument auto_clip_fac. The auto clip feature
can be switched off with auto_clip_fac = None
beta: python scalar. The smoothing parameter for estimations.
sparsity_debias: gradient norm and curvature are biased to larger values when
calculated with sparse gradient. This is useful when the model is very sparse,
e.g. LSTM with word embedding. For non-sparse CNN, turning it off could slightly
accelerate the speed.
delta_mu: for extensions. Not necessary in the basic use.
force_non_inc_step: in some very rare cases, it is necessary to force ||lr * gradient||
to be not increasing dramatically for stableness after some iterations.
In practice, if turned on, we enforce lr * sqrt(smoothed ||grad||^2)
to be less than 2x of the minimal value of historical value on smoothed || lr * grad ||.
This feature is turned off by default.
checkpoint_interval: interval to do checkpointing. For potential recovery from crashing.
stat_protect_fac: a loose hard adaptive threshold over ||grad||^2. It is to protect stat
from being destropied by exploding gradient.
Other features:
If you want to manually control the learning rates, self.lr_factor is
an interface to the outside, it is an multiplier for the internal learning rate
in YellowFin. It is helpful when you want to do additional hand tuning
or some decaying scheme to the tuned learning rate in YellowFin.
Example on using lr_factor can be found here:
https://github.com/JianGoForIt/YellowFin_Pytorch/blob/master/pytorch-cifar/main.py#L109
'''
self._lr = lr
self._mu = mu
self._lr_t = lr
self._mu_t = mu
# we convert var_list from generator to list so that
# it can be used for multiple times
self._var_list = list(var_list)
self._clip_thresh = clip_thresh
self._auto_clip_fac = auto_clip_fac
self._beta = beta
self._curv_win_width = curv_win_width
self._zero_debias = zero_debias
self._sparsity_debias = sparsity_debias
self._force_non_inc_step = force_non_inc_step
self._optimizer = torch.optim.SGD(self._var_list, lr=self._lr,
momentum=self._mu, weight_decay=weight_decay)
self._iter = 0
# global states are the statistics
self._global_state = {}
# for decaying learning rate and etc.
self._lr_factor = 1.0
# smoothing options
self._h_max_log_smooth = h_max_log_smooth
self._h_min_log_smooth = h_min_log_smooth
# checkpoint interval
self._checkpoint_interval = checkpoint_interval
self._verbose = verbose
if self._verbose:
logging.debug('Verbose mode with debugging info logged.')
# clip exploding gradient
self._adapt_clip = adapt_clip
self._exploding_grad_clip_thresh=1e3
self._exploding_grad_clip_target_value = 1e3
self._stat_protect_fac = stat_protect_fac
self._catastrophic_move_thresh = catastrophic_move_thresh
self._exploding_grad_detected = False
# workspace creation
self._use_disk_checkpoint = use_disk_checkpoint
self._checkpoint_dir = checkpoint_dir
if use_disk_checkpoint:
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._checkpoint_file = "checkpoint_pid_" + str(os.getpid())
def state_dict(self):
# for checkpoint saving
sgd_state_dict = self._optimizer.state_dict()
# for recover model internally in case of numerical issue
model_state_list = [p.data \
for group in self._optimizer.param_groups for p in group['params'] ]
global_state = self._global_state
lr_factor = self._lr_factor
iter = self._iter
lr = self._lr
mu = self._mu
clip_thresh = self._clip_thresh
beta = self._beta
curv_win_width = self._curv_win_width
zero_debias = self._zero_debias
h_min = self._h_min
h_max = self._h_max
return {
"sgd_state_dict": sgd_state_dict,
"model_state_list": model_state_list,
"global_state": global_state,
"lr_factor": lr_factor,
"iter": iter,
"lr": lr,
"mu": mu,
"clip_thresh": clip_thresh,
"beta": beta,
"curv_win_width": curv_win_width,
"zero_debias": zero_debias,
"h_min": h_min,
"h_max": h_max
}
def load_state_dict(self, state_dict):
# for checkpoint saving
self._optimizer.load_state_dict(state_dict['sgd_state_dict'])
# for recover model internally if any numerical issue happens
param_id = 0
for group in self._optimizer.param_groups:
for p in group["params"]:
p.data.copy_(state_dict["model_state_list"][param_id] )
param_id += 1
self._global_state = state_dict['global_state']
self._lr_factor = state_dict['lr_factor']
self._iter = state_dict['iter']
self._lr = state_dict['lr']
self._mu = state_dict['mu']
self._clip_thresh = state_dict['clip_thresh']
self._beta = state_dict['beta']
self._curv_win_width = state_dict['curv_win_width']
self._zero_debias = state_dict['zero_debias']
self._h_min = state_dict["h_min"]
self._h_max = state_dict["h_max"]
return
def load_state_dict_perturb(self, state_dict):
# for checkpoint saving
self._optimizer.load_state_dict(state_dict['sgd_state_dict'])
# for recover model internally if any numerical issue happens
param_id = 0
for group in self._optimizer.param_groups:
for p in group["params"]:
p.data.copy_(state_dict["model_state_list"][param_id] )
p.data += 1e-8
param_id += 1
self._global_state = state_dict['global_state']
self._lr_factor = state_dict['lr_factor']
self._iter = state_dict['iter']
self._lr = state_dict['lr']
self._mu = state_dict['mu']
self._clip_thresh = state_dict['clip_thresh']
self._beta = state_dict['beta']
self._curv_win_width = state_dict['curv_win_width']
self._zero_debias = state_dict['zero_debias']
self._h_min = state_dict["h_min"]
self._h_max = state_dict["h_max"]
return
def set_lr_factor(self, factor):
self._lr_factor = factor
return
def get_lr_factor(self):
return self._lr_factor
def zero_grad(self):
self._optimizer.zero_grad()
return
def zero_debias_factor(self):
return 1.0 - self._beta ** (self._iter + 1)
def zero_debias_factor_delay(self, delay):
# for exponentially averaged stat which starts at non-zero iter
return 1.0 - self._beta ** (self._iter - delay + 1)
def curvature_range(self):
global_state = self._global_state
if self._iter == 0:
global_state["curv_win"] = torch.FloatTensor(self._curv_win_width, 1).zero_()
curv_win = global_state["curv_win"]
grad_norm_squared = self._global_state["grad_norm_squared"]
# curv_win[self._iter % self._curv_win_width] = np.log(grad_norm_squared + eps)
curv_win[self._iter % self._curv_win_width] = grad_norm_squared
valid_end = min(self._curv_win_width, self._iter + 1)
# we use running average over log scale, accelerating
# h_max / min in the begining to follow the varying trend of curvature.
beta = self._beta
if self._iter == 0:
global_state["h_min_avg"] = 0.0
global_state["h_max_avg"] = 0.0
self._h_min = 0.0
self._h_max = 0.0
if self._h_min_log_smooth:
global_state["h_min_avg"] = \
global_state["h_min_avg"] * beta + (1 - beta) * torch.min(np.log(curv_win[:valid_end] + eps) )
else:
global_state["h_min_avg"] = \
global_state["h_min_avg"] * beta + (1 - beta) * torch.min(curv_win[:valid_end] )
if self._h_max_log_smooth:
global_state["h_max_avg"] = \
global_state["h_max_avg"] * beta + (1 - beta) * torch.max(np.log(curv_win[:valid_end] + eps) )
else:
global_state["h_max_avg"] = \
global_state["h_max_avg"] * beta + (1 - beta) * torch.max(curv_win[:valid_end] )
if self._zero_debias:
debias_factor = self.zero_debias_factor()
if self._h_min_log_smooth:
self._h_min = np.exp(global_state["h_min_avg"] / debias_factor)
else:
self._h_min = global_state["h_min_avg"] / debias_factor
if self._h_max_log_smooth:
self._h_max = np.exp(global_state["h_max_avg"] / debias_factor)
else:
self._h_max = global_state["h_max_avg"] / debias_factor
else:
if self._h_min_log_smooth:
self._h_min = np.exp(global_state["h_min_avg"] )
else:
self._h_min = global_state["h_min_avg"]
if self._h_max_log_smooth:
self._h_max = np.exp(global_state["h_max_avg"] )
else:
self._h_max = global_state["h_max_avg"]
if self._sparsity_debias:
self._h_min *= self._sparsity_avg
self._h_max *= self._sparsity_avg
return
def grad_variance(self):
global_state = self._global_state
beta = self._beta
self._grad_var = np.array(0.0, dtype=np.float32)
for group_id, group in enumerate(self._optimizer.param_groups):
for p_id, p in enumerate(group['params'] ):
if p.grad is None:
continue
grad = p.grad.data
state = self._optimizer.state[p]
if self._iter == 0:
state["grad_avg"] = grad.new().resize_as_(grad).zero_()
state["grad_avg_squared"] = 0.0
state["grad_avg"].mul_(beta).add_(1 - beta, grad)
self._grad_var += torch.sum(state["grad_avg"] * state["grad_avg"] )
if self._zero_debias:
debias_factor = self.zero_debias_factor()
else:
debias_factor = 1.0
self._grad_var /= -(debias_factor**2)
self._grad_var += global_state['grad_norm_squared_avg'] / debias_factor
# in case of negative variance: the two term are using different debias factors
self._grad_var = max(self._grad_var, eps)
if self._sparsity_debias:
self._grad_var *= self._sparsity_avg
return
def dist_to_opt(self):
global_state = self._global_state
beta = self._beta
if self._iter == 0:
global_state["grad_norm_avg"] = 0.0
global_state["dist_to_opt_avg"] = 0.0
global_state["grad_norm_avg"] = \
global_state["grad_norm_avg"] * beta + (1 - beta) * math.sqrt(global_state["grad_norm_squared"] )
global_state["dist_to_opt_avg"] = \
global_state["dist_to_opt_avg"] * beta \
+ (1 - beta) * global_state["grad_norm_avg"] / (global_state['grad_norm_squared_avg'] + eps)
if self._zero_debias:
debias_factor = self.zero_debias_factor()
self._dist_to_opt = global_state["dist_to_opt_avg"] / debias_factor
else:
self._dist_to_opt = global_state["dist_to_opt_avg"]
if self._sparsity_debias:
self._dist_to_opt /= (np.sqrt(self._sparsity_avg) + eps)
return
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
if self._verbose:
logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)
return
def lr_grad_norm_avg(self):
# this is for enforcing lr * grad_norm not
# increasing dramatically in case of instability.
# Not necessary for basic use.
global_state = self._global_state
beta = self._beta
if "lr_grad_norm_avg" not in global_state:
global_state['grad_norm_squared_avg_log'] = 0.0
global_state['grad_norm_squared_avg_log'] = \
global_state['grad_norm_squared_avg_log'] * beta \
+ (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
if "lr_grad_norm_avg" not in global_state:
global_state["lr_grad_norm_avg"] = \
0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
# we monitor the minimal smoothed ||lr * grad||
global_state["lr_grad_norm_avg_min"] = \
np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
else:
global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
+ (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
global_state["lr_grad_norm_avg_min"] = \
min(global_state["lr_grad_norm_avg_min"],
np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
def before_apply(self):
# compute running average of gradient and norm of gradient
beta = self._beta
global_state = self._global_state
if self._iter == 0:
global_state["grad_norm_squared_avg"] = 0.0
global_state["grad_norm_squared"] = 0.0
for group_id, group in enumerate(self._optimizer.param_groups):
for p_id, p in enumerate(group['params'] ):
if p.grad is None:
continue
grad = p.grad.data
param_grad_norm_squared = torch.sum(grad * grad)
global_state['grad_norm_squared'] += param_grad_norm_squared
if self._verbose:
logging.debug("Iteration %f", self._iter)
logging.debug("param grad squared gid %d, pid %d, %f, log scale: %f", group_id, p_id, param_grad_norm_squared,
np.log(param_grad_norm_squared + 1e-10) / np.log(10) )
if self._iter >= 1:
self._exploding_grad_clip_thresh = self._h_max
self._exploding_grad_clip_target_value = np.sqrt(self._h_max)
if global_state['grad_norm_squared'] >= self._exploding_grad_clip_thresh:
self._exploding_grad_detected = True
else:
self._exploding_grad_detected = False
global_state['grad_norm_squared_avg'] = \
global_state['grad_norm_squared_avg'] * beta + (1 - beta) * global_state['grad_norm_squared']
if self._verbose:
logging.debug("overall grad norm squared %f, log scale: %f",
global_state['grad_norm_squared'], np.log(global_state['grad_norm_squared'] + 1e-10) / np.log(10))
if self._sparsity_debias:
self.grad_sparsity()
self.curvature_range()
self.grad_variance()
self.dist_to_opt()
if self._verbose:
logging.debug("h_max %f ", self._h_max)
logging.debug("h_min %f ", self._h_min)
logging.debug("dist %f ", self._dist_to_opt)
logging.debug("var %f ", self._grad_var)
if self._iter > 0:
self.get_mu()
self.get_lr()
self._lr = beta * self._lr + (1 - beta) * self._lr_t
self._mu = beta * self._mu + (1 - beta) * self._mu_t
if self._verbose:
logging.debug("lr_t %f", self._lr_t)
logging.debug("mu_t %f", self._mu_t)
logging.debug("lr %f", self._lr)
logging.debug("mu %f", self._mu)
return
def get_lr(self):
self._lr_t = (1.0 - math.sqrt(self._mu_t) )**2 / (self._h_min + eps)
# slow start of lr to prevent huge lr when there is only a few iteration finished
self._lr_t = min(self._lr_t, self._lr_t * (self._iter + 1) / float(10.0 * self._curv_win_width) )
return
def get_cubic_root(self):
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
# eps in the numerator is to prevent momentum = 1 in case of zero gradient
if np.isnan(self._dist_to_opt) or np.isnan(self._h_min) or np.isnan(self._grad_var) \
or np.isinf(self._dist_to_opt) or np.isinf(self._h_min) or np.isinf(self._grad_var):
logging.warning("Input to cubic solver has invalid nan/inf value!")
raise Exception("Input to cubic solver has invalid nan/inf value!")
p = (self._dist_to_opt + eps)**2 * (self._h_min + eps)**2 / 2 / (self._grad_var + eps)
w3 = (-math.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = math.copysign(1.0, w3) * math.pow(math.fabs(w3), 1.0/3.0)
y = w - p / 3.0 / (w + eps)
x = y + 1
if self._verbose:
logging.debug("p %f, denominator %f", p, self._grad_var + eps)
logging.debug("w3 %f ", w3)
logging.debug("y %f, denominator %f", y, w + eps)
if np.isnan(x) or np.isinf(x):
logging.warning("Output from cubic is invalid nan/inf value!")
raise Exception("Output from cubic is invalid nan/inf value!")
return x
def get_mu(self):
root = self.get_cubic_root()
dr = max( (self._h_max + eps) / (self._h_min + eps), 1.0 + eps)
self._mu_t = max(root**2, ( (np.sqrt(dr) - 1) / (np.sqrt(dr) + 1) )**2 )
return
def update_hyper_param(self):
for group in self._optimizer.param_groups:
group['momentum'] = self._mu_t
#group['momentum'] = max(self._mu, self._mu_t)
if self._force_non_inc_step == False:
group['lr'] = self._lr_t * self._lr_factor
# a loose clamping to prevent catastrophically large move. If the move
# is too large, we set lr to 0 and only use the momentum to move
if self._adapt_clip and (group['lr'] * np.sqrt(self._global_state['grad_norm_squared']) >= self._catastrophic_move_thresh):
group['lr'] = self._catastrophic_move_thresh / np.sqrt(self._global_state['grad_norm_squared'] + eps)
if self._verbose:
logging.warning("clip catastropic move!")
elif self._iter > self._curv_win_width:
# force to guarantee lr * grad_norm not increasing dramatically.
# Not necessary for basic use. Please refer to the comments
# in YFOptimizer.__init__ for more details
self.lr_grad_norm_avg()
debias_factor = self.zero_debias_factor()
group['lr'] = min(self._lr * self._lr_factor,
2.0 * self._global_state["lr_grad_norm_avg_min"] \
/ (np.sqrt(np.exp(self._global_state['grad_norm_squared_avg_log'] / debias_factor) ) + eps) )
return
def auto_clip_thresh(self):
# Heuristic to automatically prevent sudden exploding gradient
# Not necessary for basic use.
return math.sqrt(self._h_max) * self._auto_clip_fac
def step(self):
# add weight decay
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
if self._clip_thresh != None:
torch.nn.utils.clip_grad_norm(self._var_list, self._clip_thresh)
elif (self._iter != 0 and self._auto_clip_fac != None):
# do not clip the first iteration
torch.nn.utils.clip_grad_norm(self._var_list, self.auto_clip_thresh() )
# loose threshold for preventing exploding gradients from destroying statistics
if self._adapt_clip and (self._iter > 1):
torch.nn.utils.clip_grad_norm(self._var_list, np.sqrt(self._stat_protect_fac * self._h_max) + eps)
try:
# before appply
self.before_apply()
# update learning rate and momentum
self.update_hyper_param()
# periodically save model and states
if self._iter % self._checkpoint_interval == 0:
if self._use_disk_checkpoint and os.path.exists(self._checkpoint_dir):
checkpoint_path = self._checkpoint_dir + "/" + self._checkpoint_file
with open(checkpoint_path, "wb") as f:
cp.dump(self.state_dict(), f, protocol=2)
else:
self._state_checkpoint = copy.deepcopy(self.state_dict() )
# protection from exploding gradient
if self._exploding_grad_detected and self._verbose:
logging.warning("exploding gradient detected: grad norm detection thresh %f , grad norm %f, grad norm after clip%f",
np.sqrt(self._exploding_grad_clip_thresh),
np.sqrt(self._global_state['grad_norm_squared'] ),
self._exploding_grad_clip_target_value)
if self._adapt_clip and self._exploding_grad_detected:
# print("exploding gradient detected: grad norm detection thresh ", np.sqrt(self._exploding_grad_clip_thresh),
# "grad norm", np.sqrt(self._global_state['grad_norm_squared'] ),
# "grad norm after clip ", self._exploding_grad_clip_target_value)
torch.nn.utils.clip_grad_norm(self._var_list, self._exploding_grad_clip_target_value + eps)
self._optimizer.step()
self._iter += 1
except:
# load the last checkpoint
logging.warning("Numerical issue triggered restore with backup. Resuming from last checkpoint.")
if self._use_disk_checkpoint and os.path.exists(self._checkpoint_dir):
checkpoint_path = self._checkpoint_dir + "/" + self._checkpoint_file
with open(checkpoint_path, "rb") as f:
self.load_state_dict_perturb(cp.load(f))
else:
self.load_state_dict_perturb(copy.deepcopy(self._state_checkpoint) )
return
| hyperbolics-master | pytorch/yellowfin.py |
import logging, argh
import os, sys
import networkx as nx
import random
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
import matplotlib as mpl
if torch.cuda.is_available(): mpl.use('Agg')
import matplotlib.pyplot as plt
if torch.cuda.is_available(): plt.ioff()
import scipy
import scipy.sparse.csgraph as csg
import pandas
import numpy as np, math
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, root_dir)
import utils.load_graph as load_graph
import utils.vis as vis
import utils.distortions as dis
import graph_helpers as gh
import mds_warmstart
from hyperbolic_models import ProductEmbedding
from hyperbolic_parameter import RParameter
# This describes a hyperbolic optimizer in Pytorch. It requires two modifications:
#
# * When declaring a parameter, one uses a class called "Hyperbolic
# * Parameter". It assumes that the _last_ dimension is in the
# * disk. E.g., a tensor of size n x m x d means that you have n x m
# * elements of H_D. d >= 2.
#
# * It inherits from parameter, so you can do anything you want with it (set its value, pass it around).
#
# * So that you can use any optimizer and get the correction, after the `backward` call but before the `step`, you need to call a function called `hyperbolic_fix`. It will walk the tree, look for the hyperbolic parameters and correct them.
# * The step function below can be used pretty generically.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
def cu_var(x):
if isinstance(x, list) : return [cu_var(u) for u in x]
if isinstance(x, tuple): return tuple([cu_var(u) for u in list(x)])
return torch.tensor(x, device=device)
def unwrap(x):
""" Extract the numbers from (sequences of) pytorch tensors """
if isinstance(x, list) : return [unwrap(u) for u in x]
if isinstance(x, tuple): return tuple([unwrap(u) for u in list(x)])
return x.detach().cpu().numpy()
#
# Dataset extractors
#
class GraphRowSubSampler(torch.utils.data.Dataset):
def __init__(self, G, scale, subsample, weight_fn, Z=None):
super(GraphRowSubSampler, self).__init__()
self.graph = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
self.n = G.order()
self.scale = scale
self.subsample = subsample if subsample > 0 else self.n-1
self.val_cache = torch.zeros((self.n,self.subsample), dtype=torch.double)
self.idx_cache = torch.LongTensor(self.n,self.subsample,2).zero_()
self.w_cache = torch.zeros((self.n, self.subsample), dtype=torch.double)
self.cache = set()
self.verbose = False
self.n_cached = 0
self.Z = Z
self.nbr_frac = 1.0 # fill up this proportion of samples with neighbors
self.weight_fn = weight_fn
logging.info(self)
## initialize up front
for i in range(self.n):
self.__getitem__(i)
## store total weight
self.total_w = torch.sum(self.w_cache)
self.max_dist = torch.max(self.val_cache)
def __getitem__(self, index):
if index not in self.cache:
if self.verbose: logging.info(f"Cache miss for {index}")
h = gh.djikstra_wrapper( (self.graph, [index]) )[0,:] if self.Z is None else self.Z[index,:]
# add in all the edges
cur = 0
self.idx_cache[index,:,0] = index
neighbors = scipy.sparse.find(self.graph[index,:])[1]
for e in neighbors:
self.idx_cache[index,cur,1] = int(e)
self.val_cache[index,cur] = self.scale*h[e]
self.w_cache[index,cur] = self.weight_fn(1.0)
cur += 1
if cur >= self.nbr_frac * self.subsample: break
scratch = np.array(range(self.n))
np.random.shuffle(scratch)
i = 0
while cur < self.subsample and i < self.n:
v = scratch[i]
if v != index and v not in neighbors:
self.idx_cache[index,cur,1] = int(v)
self.val_cache[index,cur] = self.scale*h[v]
# self.val_cache[index,cur] = 0
self.w_cache[index,cur] = self.weight_fn(h[v])
cur += 1
i += 1
if self.verbose: logging.info(f"\t neighbors={neighbors} {self.idx_cache[index,:,1].numpy().T}")
self.cache.add(index)
self.n_cached += 1
# if self.n_cached % (max(self.n//20,1)) == 0: logging.info(f"\t Cached {self.n_cached} of {self.n}")
# print("GraphRowSubSampler: idx shape ", self.idx_cache[index,:].size())
return (self.idx_cache[index,:], self.val_cache[index,:], self.w_cache[index,:])
def __len__(self): return self.n
def __repr__(self):
return f"Subsample: {self.n} points with scale {self.scale} subsample={self.subsample}"
class GraphRowSampler(torch.utils.data.Dataset):
def __init__(self, G, scale, use_cache=True):
self.graph = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
self.n = G.order()
self.scale = scale
self.cache = dict() if use_cache else None
def __getitem__(self, index):
h = None
if self.cache is None or index not in self.cache:
h = gh.djikstra_wrapper( (self.graph, [index]) )
if self.cache is not None:
self.cache[index] = h
#logging.info(f"info {index}")
else:
h = self.cache[index]
#logging.info(f"hit {index}")
idx = torch.tensor([ (index, j) for j in range(self.n) if j != index], dtype=torch.long)
v = torch.DoubleTensor(h).view(-1)[idx[:,1]]
return (idx, v)
def __len__(self): return self.n
def __repr__(self):
return f"DATA: {self.n} points with scale {self.scale}"
def collate(ls):
x, y = zip(*ls)
return torch.cat(x), torch.cat(y)
def collate3(ls):
x, y, z = zip(*ls)
return torch.cat(x), torch.cat(y), torch.cat(z)
def build_dataset(G, lazy_generation, sample, subsample, scale, batch_size, weight_fn, num_workers):
n = G.order()
Z = None
logging.info("Building dataset")
if subsample is not None and (subsample <= 0 or subsample >= n):
subsample = n-1
if lazy_generation:
if subsample is not None:
z = DataLoader(GraphRowSubSampler(G, scale, subsample, weight_fn), batch_size//subsample, shuffle=True, collate_fn=collate3)
else:
z = DataLoader(GraphRowSampler(G, scale), batch_size//(n-1), shuffle=True, collate_fn=collate)
logging.info("Built Data Sampler")
else:
Z = gh.build_distance(G, scale, num_workers=int(num_workers) if num_workers is not None else 16) # load the whole matrix
logging.info(f"Built distance matrix with {scale} factor")
if subsample is not None:
z = DataLoader(GraphRowSubSampler(G, scale, subsample, weight_fn, Z=Z), batch_size//subsample, shuffle=True, collate_fn=collate3)
else:
idx = torch.LongTensor([(i,j) for i in range(n) for j in range(i+1,n)])
Z_sampled = gh.dist_sample_rebuild_pos_neg(Z, sample) if sample < 1 else Z
vals = torch.DoubleTensor([Z_sampled[i,j] for i in range(n) for j in range(i+1, n)])
z = DataLoader(TensorDataset(idx,vals), batch_size=batch_size, shuffle=True, pin_memory=torch.cuda.is_available())
# TODO does this not shuffle immediately?
logging.info("Built data loader")
return Z, z
#
# DATA Diagnostics
#
def major_stats(G, n, m, lazy_generation, Z,z, fig, ax, writer, visualize, subsample, n_rows_sampled=256, num_workers=16):
m.train(False)
if lazy_generation:
logging.info(f"\t Computing Major Stats lazily... ")
avg, me, mc = 0.0, 0.0, 0.0
good,bad = 0,0
_count = 0
for u in z:
index,vs,_ = u
v_rec = unwrap(m.dist_idx(index.to(device)))
v = vs.cpu().numpy()
for i in range(len(v)):
if dis.entry_is_good(v[i], v_rec[i]):
(_avg,me,mc) = dis.distortion_entry(v[i], v_rec[i], me, mc)
avg += _avg
good += 1
else:
bad += 1
_count += len(v)
# if n_rows_sampled*(n-1) <= _count:
ss = subsample if subsample is not None else n-1
if _count >= n_rows_sampled*ss:
break
logging.info(f"\t\t Completed edges={_count} good={good} bad={bad}")
avg_dist = avg/good if good > 0 else 0
wc_dist = me*mc
nan_elements = bad
map_avg = 0.0
# sample for rows
shuffled = list(range(n))
np.random.shuffle(shuffled)
mm = 0
for i in shuffled[0:n_rows_sampled]:
h_rec = unwrap(m.dist_row(i))
map_avg += dis.map_via_edges(G,i, h_rec)
mm += 1
mapscore = map_avg/mm
else:
H = Z
Hrec = unwrap(m.dist_matrix())
logging.info("Compare matrices built")
mc, me, avg_dist, nan_elements = dis.distortion(H, Hrec, n, num_workers)
wc_dist = me*mc
mapscore = dis.map_score(scipy.sparse.csr_matrix.todense(G).A, Hrec, n, num_workers)
if visualize:
num_spheres = np.minimum(len(m.S), 5)
num_hypers = np.minimum(len(m.H), 5)
for emb in range(num_spheres):
ax_this = vis.get_ax(num_hypers, num_spheres, ax, emb, 1)
ax_this.cla()
for emb in range(num_hypers):
ax_this = vis.get_ax(num_hypers, num_spheres, ax, emb, 0)
ax_this.cla()
text_3d_only = False
vis.draw_graph(G,m,fig, ax)
if num_hypers > 0:
axlabel = vis.get_ax(num_hypers, num_spheres, ax, 0, 0)
else:
axlabel = vis.get_ax(num_hypers, num_spheres, ax, 0, 1)
sdim = 0 if len(m.S) == 0 else len((m.S[0]).w[0])
if sdim == 3: text_3d_only = True
if text_3d_only:
axlabel.text(-1.00, 1.0, 1.1, "Epoch "+str(m.epoch), fontsize=20)
axlabel.text(-1.00, 1.0, 0.8, "MAP "+str(mapscore)[0:5], fontsize=20)
else:
axlabel.text(0.70, 1.1, "Epoch "+str(m.epoch), fontsize=20)
axlabel.text(0.70, 1.0, "MAP "+str(mapscore)[0:5], fontsize=20)
writer.grab_frame()
logging.info(f"Distortion avg={avg_dist} wc={wc_dist} me={me} mc={mc} nan_elements={nan_elements}")
logging.info(f"MAP = {mapscore}")
logging.info(f"scale={unwrap(m.scale())}")
return avg_dist, wc_dist, me, mc, mapscore
@argh.arg("dataset", help="dataset number")
# model params
@argh.arg("-d", "--dim", help="Dimension to use")
@argh.arg("--hyp", help="Number of copies of hyperbolic space")
@argh.arg("--edim", help="Euclidean dimension to use")
@argh.arg("--euc", help="Number of copies of Euclidean space")
@argh.arg("--sdim", help="Spherical dimension to use")
@argh.arg("--sph", help="Number of copies of spherical space")
@argh.arg("--riemann", help="Use Riemannian metric for product space. Otherwise, use L1 sum")
@argh.arg("-s", "--scale", help="Scale factor")
@argh.arg("-t", "--tol", help="Tolerances for projection")
# optimizers and params
@argh.arg("-y", "--use-yellowfin", help="Turn off yellowfin")
@argh.arg("--use-adagrad", help="Use adagrad")
@argh.arg("--use-svrg", help="Use SVRG")
@argh.arg("-T", help="SVRG T parameter")
@argh.arg("--use-hmds", help="Use MDS warmstart")
@argh.arg("-l", "--learning-rate", help="Learning rate")
@argh.arg("--decay-length", help="Number of epochs per lr decay")
@argh.arg("--decay-step", help="Size of lr decay")
@argh.arg("--momentum", help="Momentum")
@argh.arg("--epochs", help="number of steps in optimization")
@argh.arg("--burn-in", help="number of epochs to initially train at lower LR")
@argh.arg("-x", "--extra-steps", type=int, help="Steps per batch")
# data
@argh.arg("--num-workers", help="Number of workers for loading. Default is to use all cores")
@argh.arg("--batch-size", help="Batch size (number of edges)")
@argh.arg("--sample", help="Sample the distance matrix")
@argh.arg("-g", "--lazy-generation", help="Use a lazy data generation technique")
@argh.arg("--subsample", type=int, help="Number of edges per row to subsample")
@argh.arg("--resample-freq", type=int, help="Resample data frequency (expensive)")
# logging and saving
@argh.arg("--print-freq", help="Print loss this every this number of steps")
@argh.arg("--checkpoint-freq", help="Checkpoint Frequency (Expensive)")
@argh.arg("--model-save-file", help="Save model file")
@argh.arg("--model-load-file", help="Load model file")
@argh.arg("-w", "--warm-start", help="Warm start the model with MDS")
@argh.arg("--log-name", help="Log to a file")
@argh.arg("--log", help="Log to a file (automatic name)")
# misc
@argh.arg("--learn-scale", help="Learn scale")
@argh.arg("--logloss")
@argh.arg("--distloss")
@argh.arg("--squareloss")
@argh.arg("--symloss")
@argh.arg("-e", "--exponential-rescale", type=float, help="Exponential Rescale")
@argh.arg("--visualize", help="Produce an animation (dimension 2 only)")
def learn(dataset, dim=2, hyp=1, edim=1, euc=0, sdim=1, sph=0, scale=1., riemann=False, learning_rate=1e-1, decay_length=1000, decay_step=1.0, momentum=0.0, tol=1e-8, epochs=100, burn_in=0,
use_yellowfin=False, use_adagrad=False, resample_freq=1000, print_freq=1, model_save_file=None, model_load_file=None, batch_size=16,
num_workers=None, lazy_generation=False, log_name=None, log=False, warm_start=None, learn_scale=False, checkpoint_freq=100, sample=1., subsample=None,
logloss=False, distloss=False, squareloss=False, symloss=False, exponential_rescale=None, extra_steps=1, use_svrg=False, T=10, use_hmds=False, visualize=False):
# Log configuration
formatter = logging.Formatter('%(asctime)s %(message)s')
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%FT%T',)
if log_name is None and log:
log_name = f"{os.path.splitext(dataset)[0]}.H{dim}-{hyp}.E{edim}-{euc}.S{sdim}-{sph}.lr{learning_rate}.log"
if log_name is not None:
logging.info(f"Logging to {log_name}")
log = logging.getLogger()
fh = logging.FileHandler(log_name)
fh.setFormatter( formatter )
log.addHandler(fh)
logging.info(f"Commandline {sys.argv}")
if model_save_file is None: logging.warn("No Model Save selected!")
G = load_graph.load_graph(dataset)
GM = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
# grab scale if warm starting:
if warm_start:
scale = pandas.read_csv(warm_start, index_col=0).as_matrix()[0, -1]
n = G.order()
logging.info(f"Loaded Graph {dataset} with {n} nodes scale={scale}")
if exponential_rescale is not None:
# torch.exp(exponential_rescale * -d)
def weight_fn(d):
if d <= 2.0: return 5.0
elif d > 4.0: return 0.01
else: return 1.0
else:
def weight_fn(d):
return 1.0
Z, z = build_dataset(G, lazy_generation, sample, subsample, scale, batch_size, weight_fn, num_workers)
if model_load_file is not None:
logging.info(f"Loading {model_load_file}...")
m = torch.load(model_load_file).to(device)
logging.info(f"Loaded scale {unwrap(m.scale())} {torch.sum(m.embedding().data)} {m.epoch}")
else:
logging.info(f"Creating a fresh model warm_start?={warm_start}")
m_init = None
if warm_start:
# load from DataFrame; assume that the julia combinatorial embedding has been saved
ws_data = pandas.read_csv(warm_start, index_col=0).as_matrix()
scale = ws_data[0, ws_data.shape[1]-1]
m_init = torch.DoubleTensor(ws_data[:,range(ws_data.shape[1]-1)])
elif use_hmds:
# m_init = torch.DoubleTensor(mds_warmstart.get_normalized_hyperbolic(mds_warmstart.get_model(dataset,dim,scale)[1]))
m_init = torch.DoubleTensor(mds_warmstart.get_model(dataset,dim,scale)[1])
logging.info(f"\t Warmstarting? {warm_start} {m_init.size() if warm_start else None} {G.order()}")
initial_scale = z.dataset.max_dist / 3.0
print("MAX DISTANCE", z.dataset.max_dist)
print("AVG DISTANCE", torch.mean(z.dataset.val_cache))
initial_scale=0.0
m = ProductEmbedding(G.order(), dim, hyp, edim, euc, sdim, sph, initialize=m_init, learn_scale=learn_scale, initial_scale=initial_scale, logrel_loss=logloss, dist_loss=distloss, square_loss=squareloss, sym_loss=symloss, exponential_rescale=exponential_rescale, riemann=riemann).to(device)
m.normalize()
m.epoch = 0
logging.info(f"Constructed model with dim={dim} and epochs={m.epoch} isnan={np.any(np.isnan(m.embedding().cpu().data.numpy()))}")
if visualize:
name = 'animations/' + f"{os.path.split(os.path.splitext(dataset)[0])[1]}.H{dim}-{hyp}.E{edim}-{euc}.S{sdim}-{sph}.lr{learning_rate}.ep{epochs}.seed{seed}"
fig, ax, writer = vis.setup_plot(m=m, name=name, draw_circle=True)
else:
fig = None
ax = None
writer = None
#
# Build the Optimizer
#
# TODO: Redo this in a sensible way!!
# per-parameter learning rates
exp_params = [p for p in m.embed_params if p.use_exp]
learn_params = [p for p in m.embed_params if not p.use_exp]
hyp_params = [p for p in m.hyp_params if not p.use_exp]
euc_params = [p for p in m.euc_params if not p.use_exp]
sph_params = [p for p in m.sph_params if not p.use_exp]
scale_params = m.scale_params
# model_params = [{'params': m.embed_params}, {'params': m.scale_params, 'lr': 1e-4*learning_rate}]
# model_params = [{'params': learn_params}, {'params': m.scale_params, 'lr': 1e-4*learning_rate}]
model_params = [{'params': hyp_params}, {'params': euc_params}, {'params': sph_params, 'lr': 0.1*learning_rate}, {'params': m.scale_params, 'lr': 1e-4*learning_rate}]
# opt = None
if len(model_params) > 0:
opt = torch.optim.SGD(model_params, lr=learning_rate/10, momentum=momentum)
# opt = torch.optim.SGD(learn_params, lr=learning_rate/10, momentum=momentum)
# opt = torch.optim.SGD(model_params, lr=learning_rate/10, momentum=momentum)
# exp = None
# if len(exp_params) > 0:
# exp = torch.optim.SGD(exp_params, lr=1.0) # dummy for zeroing
if len(scale_params) > 0:
scale_opt = torch.optim.SGD(scale_params, lr=1e-3*learning_rate)
scale_decay = torch.optim.lr_scheduler.StepLR(scale_opt, step_size=1, gamma=.99)
else:
scale_opt = None
scale_decay = None
lr_burn_in = torch.optim.lr_scheduler.MultiStepLR(opt, milestones=[burn_in], gamma=10)
# lr_decay = torch.optim.lr_scheduler.StepLR(opt, decay_length, decay_step) #TODO reconcile multiple LR schedulers
if use_yellowfin:
from yellowfin import YFOptimizer
opt = YFOptimizer(model_params)
if use_adagrad:
opt = torch.optim.Adagrad(model_params)
if use_svrg:
from svrg import SVRG
base_opt = torch.optim.Adagrad if use_adagrad else torch.optim.SGD
opt = SVRG(m.parameters(), lr=learning_rate, T=T, data_loader=z, opt=base_opt)
# TODO add ability for SVRG to take parameter groups
logging.info(opt)
# Log stats from import: when warmstarting, check that it matches Julia's stats
logging.info(f"*** Initial Checkpoint. Computing Stats")
major_stats(GM,n,m, lazy_generation, Z, z, fig, ax, writer, visualize, subsample)
logging.info("*** End Initial Checkpoint\n")
# track best stats
best_loss = 1.0e10
best_dist = 1.0e10
best_wcdist = 1.0e10
best_map = 0.0
for i in range(m.epoch+1, m.epoch+epochs+1):
lr_burn_in.step()
# lr_decay.step()
# scale_decay.step()
# print(scale_opt.param_groups[0]['lr'])
# for param_group in opt.param_groups:
# print(param_group['lr'])
# print(type(opt.param_groups), opt.param_groups)
l, n_edges = 0.0, 0.0 # track average loss per edge
m.train(True)
if use_svrg:
for data in z:
def closure(data=data, target=None):
_data = data if target is None else (data,target)
c = m.loss(_data.to(device))
c.backward()
return c.data[0]
l += opt.step(closure)
# Projection
m.normalize()
else:
# scale_opt.zero_grad()
for the_step in range(extra_steps):
# Accumulate the gradient
for u in z:
# Zero out the gradients
# if opt is not None: opt.zero_grad() # This is handled by the SVRG.
# if exp is not None: exp.zero_grad()
opt.zero_grad()
for p in exp_params:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# Compute loss
_loss = m.loss(cu_var(u))
_loss.backward()
l += _loss.item() * u[0].size(0)
# print(weight)
n_edges += u[0].size(0)
# modify gradients if necessary
RParameter.correct_metric(m.parameters())
# step
opt.step()
for p in exp_params:
lr = opt.param_groups[0]['lr']
p.exp(lr)
# Projection
m.normalize()
# scale_opt.step()
l /= n_edges
# m.epoch refers to num of training epochs finished
m.epoch += 1
# Logging code
# if l < tol:
# logging.info("Found a {l} solution. Done at iteration {i}!")
# break
if i % print_freq == 0:
logging.info(f"{i} loss={l}")
if (i <= burn_in and i % (checkpoint_freq/5) == 0) or i % checkpoint_freq == 0:
logging.info(f"\n*** Major Checkpoint. Computing Stats and Saving")
avg_dist, wc_dist, me, mc, mapscore = major_stats(GM,n,m, True, Z, z, fig, ax, writer, visualize, subsample)
best_loss = min(best_loss, l)
best_dist = min(best_dist, avg_dist)
best_wcdist = min(best_wcdist, wc_dist)
best_map = max(best_map, mapscore)
if model_save_file is not None:
fname = f"{model_save_file}.{m.epoch}"
logging.info(f"Saving model into {fname} {torch.sum(m.embedding().data)} ")
torch.save(m, fname)
logging.info("*** End Major Checkpoint\n")
if i % resample_freq == 0:
if sample < 1. or subsample is not None:
Z, z = build_dataset(G, lazy_generation, sample, subsample, scale, batch_size, weight_fn, num_workers)
logging.info(f"final loss={l}")
logging.info(f"best loss={best_loss}, distortion={best_dist}, map={best_map}, wc_dist={best_wcdist}")
final_dist, final_wc, final_me, final_mc, final_map = major_stats(GM, n, m, lazy_generation, Z,z, fig, ax, writer, False, subsample)
if log_name is not None:
with open(log_name + '.stat', "w") as f:
f.write("Best-loss MAP dist wc Final-loss MAP dist wc me mc\n")
f.write(f"{best_loss:10.6f} {best_map:8.4f} {best_dist:8.4f} {best_wcdist:8.4f} {l:10.6f} {final_map:8.4f} {final_dist:8.4f} {final_wc:8.4f} {final_me:8.4f} {final_mc:8.4f}")
if visualize:
writer.finish()
if model_save_file is not None:
fname = f"{model_save_file}.final"
logging.info(f"Saving model into {fname}-final {torch.sum(m.embedding().data)} {unwrap(m.scale())}")
torch.save(m, fname)
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.add_commands([learn])
_parser.dispatch()
| hyperbolics-master | pytorch/pytorch_hyperbolic.py |
import utils.data_prep as dp
import pytorch.graph_helpers as gh
import numpy as np
import utils.distortions as dis
import utils.load_graph as load_graph
import torch, logging
from math import sqrt
def cudaify(x): return x.cuda() if torch.cuda.is_available() else x
def compute_d(u,l,n):
if np.min(u) < 0.:
print(np.min(u))
print(u)
assert(False)
b =max(1. + (sum(u)/sqrt(l)*np.linalg.norm(u))**2,1.)
alpha = b - np.sqrt(b**2-1.)
v = u*(l*(1.-alpha))/sum(u)
d = (v+1.)/(1.+alpha)
d_min = np.min(d)
if d_min < 1:
print("\t\t\t Warning: Noisy d_min correction used.")
#d/=d_min
dv = d - 1
dinv = 1./d
t = dinv*np.divide(v-alpha,(1)+alpha)
return (d,dv,t)
def data_rec(points, scale=1.0):
(n,d) = points.shape
Z = np.zeros( (n,n) )
for i in range(n):
di = 1-np.linalg.norm(points[i,:])**2
for j in range(n):
dj = 1-np.linalg.norm(points[j,:])**2
Z[i,j] = np.linalg.norm(points[i,:] - points[j,:])**2/(di*dj)
return (Z,np.arccosh(1+2.*Z)/scale)
def center_numpy_inplace(tZ,inv_d,v):
n = tZ.shape[0]
for i in range(n):
for j in range(n):
tZ[i,j] *= inv_d[i]
for i in range(n):
for j in range(n):
tZ[i,j] *= inv_d[j]
for i in range(n):
for j in range(n):
tZ[i,j] -= (v[i]+v[j])
#mu = np.mean(tZ,1)
#for i in range(n): tZ[:,i] -= mu
#mu = np.mean(tZ,0)
#for i in range(n): tZ[i,:] -= mu
def power_method(_A,r,T=5000,tol=1e-14):
(n,n) = _A.shape
A = cudaify( torch.DoubleTensor(_A) )
x = cudaify( torch.DoubleTensor( np.random.randn(n,r)/r ) )
_eig = cudaify( torch.DoubleTensor(r).zero_() )
for i in range(r):
for j in range(T):
y = x[:,0:i]@(x[:,0:i].transpose(0,1)@x[:,i]) if i > 0 else 0.
x[:,i] = A@x[:,i] - y
nx = torch.norm(x[:,i])
x[:,i] /= nx
if (abs(_eig[i]) > tol) and (abs(_eig[i] - nx)/_eig[i] < tol):
print(f"\teig {i} iteration {j} --> {_eig[i]}")
break
_eig[i] = nx
return (_eig.cpu().numpy(), x.cpu().numpy())
def get_eig(A,r, use_power=False):
if use_power:
return power_method(A,r)
else:
e, ev = np.linalg.eigh(A)
return e[-r:], ev[:,-r:]
def get_model(dataset, max_k, scale = 1.0):
#G = dp.load_graph(dataset)
G = load_graph.load_graph(dataset)
H = gh.build_distance(G,1.0)
(n,n) = H.shape
Z = (np.cosh(scale*H) -1)/2
# Find Perron vector
(d,U)=get_eig(Z,1)
idx = np.argmax(d)
l0 = d[idx]
u = U[:,idx]
u = u if u[0] > 0 else -u
(d1,dv,v) = compute_d(u,l0,n)
inv_d = 1./d1
#Q = (np.eye(n)-np.ones( (n,n)) /n)*np.diag(inv_d)
#G = -Q@[email protected]/2
G = Z # This does make a copy.
center_numpy_inplace(G, inv_d, v)
G /= -2.0
# Recover our points
(emb_d, points_d) = get_eig(G,max_k)
# good_idx = emb_d > 0
# our_points = np.real(points_d[:,good_idx]@np.diag(np.sqrt(emb_d[good_idx])))
bad_idx = emb_d <= 0
emb_d[bad_idx] = 0
our_points = [email protected](np.sqrt(emb_d))
# Just for evaluation
(Z,Hrec) = data_rec(our_points, scale)
# np.set_printoptions(threshold=np.nan)
# print(f"Distortion Score {dis.distortion(H, Hrec, n, 2)}")
# this will get done in the preliminary stats pass:
#print(f"Map Score {dis.map_score(H/scale, Hrec, n, 2)}")
return (H,our_points)
def get_normalized_hyperbolic(model):
x = torch.DoubleTensor(model)
ds = torch.norm(x,2,1)
ds2 = ds**2
# need to find y s.t. \|x\|^2 = \frac{\|y\|^2}{1-\|y\|^2} => \|y\|^2 = \frac{\|x\|^2}{1+\|x\|^2}
new_norm = torch.sqrt(ds2/((1+1e-10)*(1.0+ds2)))/ds
z = torch.diag(new_norm) @ x
logging.info(f"norm_z={torch.max(torch.norm(z,2,1))} min_norm_ds={torch.min(ds)} input={torch.max(torch.norm(x,2,1))} q={np.any(np.isnan(z.numpy()))}")
return z
| hyperbolics-master | pytorch/mds_warmstart.py |
import numpy as np
import torch
from torch import nn
from hyperbolic_parameter import PoincareParameter, EuclideanParameter, SphericalParameter, HyperboloidParameter
import logging
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#
# Our models
#
class Hyperbolic_Mean(nn.Module):
def __init__(self, d):
super(Hyperbolic_Mean, self).__init__()
self.w = Hyperbolic_Parameter(sizes=d)
def loss(self, y_data):
return torch.sum(dist(self.w.repeat(y_data.size(0),1),y_data)**2)
def normalize(self):
self.w.proj()
class Hyperbolic_Lines(nn.Module):
def __init__(self, d):
super(Hyperbolic_Lines, self).__init__()
self.w = Hyperbolic_Parameter(sizes=d)
# $$\min_{v} \sum_{j=1}^{n} \mathrm{acosh}\left(1 + d^2_E(L(v), w_j)\right)^2$$
# learn the lines in a zero centered way.
def loss(self, y_data):
return torch.sum(acosh(1 + line_dist_sq(self.w, y_data))**2)
def normalize(self): # we handle this in the line_dist_s
return
### Embedding Models
# We implement both in pytorch using a custom SGD optimizer. This is used to correct for the hyperbolic variables.
#
# Here are the basic distance and projection functions. The distance in Poincaré space is:
#
# $$ d(u,v) = \mathrm{arcosh}\left(1 + 2\frac{\|u-v\|^2}{(1-\|u\|^2)(1-\|v\|^2)}\right)$$
#
# We implement a simple projection on the disk as well.
#{\displaystyle \operatorname {arcosh} x=\ln \left(x+{\sqrt {x^{2}-1}}\right)}
def acosh(x):
return torch.log(x + torch.sqrt(x**2-1))
# TODO: probably makes sense to move distance function into the corresponding Parameter type
def dist_p(u,v):
z = 2*torch.norm(u-v,2,1)**2
uu = 1. + torch.div(z,((1-torch.norm(u,2,1)**2)*(1-torch.norm(v,2,1)**2)))
# machine_eps = np.finfo(uu.data.numpy().dtype).eps # problem with cuda tensor
# return acosh(torch.clamp(uu, min=1+machine_eps))
return acosh(uu)
# def h_proj(x, eps=1e-9):
# current_norms = torch.norm(x,2,x.dim() - 1)
# mask_idx = current_norms < 1.0
# modified = 1./((1+eps)*current_norms)
# modified[mask_idx] = 1.0
# new_size = [1]*current_norms.dim() + [x.size(x.dim()-1)]
# modified = modified.unsqueeze(modified.dim()).repeat(*new_size)
# return x * modified
def dot(x,y): return torch.sum(x * y, -1)
def dist_e(u, v):
""" Input shape (n, d) """
return torch.norm(u-v, 2, dim=1)
def dist_s(u, v, eps=1e-9):
uu = SphericalParameter._proj(u)
vv = SphericalParameter._proj(v)
return torch.acos(torch.clamp(dot(uu, vv), -1+eps, 1-eps))
# Compute the
# $$\min_{v} \sum_{j=1}^{n} \mathrm{acosh}\left(1 + d^2_E(L(v), w_j)\right)^2$$
def line_dist_sq(_x,y):
norm_x = torch.norm(_x)**(-2)
x = _x.repeat(y.size(0),1)
return torch.norm(y - torch.diag(dot(x,y)*norm_x)@x,2,1)**2
class ProductEmbedding(nn.Module):
def __init__(self, n, hyp_d, hyp_copies=1, euc_d=1, euc_copies=0, sph_d=1, sph_copies=0, project=True, initialize=None, learn_scale=False, initial_scale=0.0, absolute_loss=False, logrel_loss=False, dist_loss=False, square_loss=False, sym_loss=False, exponential_rescale=None, riemann=False):
super().__init__()
self.n = n
self.riemann = riemann
# self.H = nn.ModuleList([Embedding(dist_p, PoincareParameter, n, hyp_d, project, initialize, learn_scale, initial_scale) for _ in range(hyp_copies)])
self.H = nn.ModuleList([Embedding(HyperboloidParameter.dist_h, HyperboloidParameter, n, hyp_d, project, initialize, learn_scale, initial_scale) for _ in range(hyp_copies)])
self.E = nn.ModuleList([Embedding(dist_e, EuclideanParameter, n, euc_d, False, initialize, False, initial_scale) for _ in range(euc_copies)])
# self.E = nn.ModuleList([Embedding(dist_e, EuclideanParameter, n, euc_d, False, initialize, learn_scale, initial_scale) for _ in range(euc_copies)])
self.S = nn.ModuleList([Embedding(dist_s, SphericalParameter, n, sph_d, project, initialize, learn_scale, initial_scale) for _ in range(sph_copies)])
self.scale_params = [H.scale_log for H in self.H] \
+ [E.scale_log for E in self.E] \
+ [S.scale_log for S in self.S] \
if learn_scale else []
self.hyp_params = [H.w for H in self.H]
self.euc_params = [E.w for E in self.E]
self.sph_params = [S.w for S in self.S]
self.embed_params = [H.w for H in self.H] \
+ [E.w for E in self.E] \
+ [S.w for S in self.S]
self.absolute_loss = absolute_loss
self.logrel_loss = logrel_loss
self.dist_loss = dist_loss
self.square_loss = square_loss
self.sym_loss = sym_loss
abs_str = "absolute" if self.absolute_loss else "relative"
self.exponential_rescale = exponential_rescale
exp_str = f"Exponential {self.exponential_rescale}" if self.exponential_rescale is not None else "No Rescale"
logging.info(f"{abs_str} {exp_str}")
def step_rescale( self, values ):
y = cudaify( torch.ones( values.size() ).double()/(10*self.n) )
y[torch.lt( values.data, 5)] = 1.0
return Variable(y, requires_grad=False)
#return values**(-2)
def all_attr(self, fn):
H_attr = [fn(H) for H in self.H]
E_attr = [fn(E) for E in self.E]
S_attr = [fn(S) for S in self.S]
return H_attr + E_attr + S_attr
def embedding(self):
""" Return list of all entries of the embedding(s) """
return torch.cat(self.all_attr(lambda emb: emb.w.view(-1)))
# return torch.stack([H.w for H in self.H], dim=0)
# return torch.stack(self.all_attr(lambda emb: emb.w), dim=0)
# return (torch.stack([H.w for H in self.H], dim=0), torch.stack([H.w for H in self.E], dim=0), torch.stack([H.w for H in self.S], dim=0))
def scale(self):
# return [H.scale() for H in self.H]
return self.all_attr(lambda emb: emb.scale())
def dist_idx(self, idx):
# return sum([H.dist_idx(idx) for H in self.H])
d = self.all_attr(lambda emb: emb.dist_idx(idx))
if self.riemann:
return torch.norm(torch.stack(d, 0), 2, dim=0)
else:
return sum(d)
def dist_row(self, i):
# return sum([H.dist_row(i) for H in self.H])
d = self.all_attr(lambda emb: emb.dist_row(i))
if self.riemann:
return torch.norm(torch.stack(d, 0), 2, dim=0)
else:
return sum(d)
def dist_matrix(self):
# return sum([H.dist_matrix() for H in self.H])
d = self.all_attr(lambda emb: emb.dist_matrix())
if self.riemann:
return torch.norm(torch.stack(d), 2, dim=0)
else:
return sum(d)
def loss(self, _x):
idx, values, w = _x
d = self.dist_idx(idx)
#term_rescale = torch.exp( 2*(1.-values) ) if self.exponential_rescale else self.step_rescale(values)
term_rescale = w
if self.absolute_loss:
loss = torch.sum( term_rescale*( d - values)**2)
elif self.logrel_loss:
loss = torch.sum( torch.log((d/values)**2)**2 )
elif self.dist_loss:
loss = torch.sum( torch.abs(term_rescale*((d/values) - 1)) )
elif self.square_loss:
loss = torch.sum( term_rescale*torch.abs((d/values)**2 - 1) )
else:
l1 = torch.sum( term_rescale*((d/values) - 1)**2 )
l2 = torch.sum( term_rescale*((values/d) - 1)**2 ) if self.sym_loss else 0
loss = l1 + l2
return loss / values.size(0)
def normalize(self):
for H in self.H:
H.normalize()
for S in self.S:
S.normalize()
class Embedding(nn.Module):
def __init__(self, dist_fn, param_cls, n, d, project=True, initialize=None, learn_scale=False, initial_scale=0.0):
super().__init__()
self.dist_fn = dist_fn
self.n, self.d = n, d
self.project = project
if initialize is not None: logging.info(f"Initializing {np.any(np.isnan(initialize.numpy()))} {initialize.size()} {(n,d)}")
# x = h_proj( 1e-3 * torch.rand(n, d).double() ) if initialize is None else torch.DoubleTensor(initialize[0:n,0:d])
# self.w = Hyperbolic_Parameter(x)
# self.w = param_cls(x)
self.w = param_cls(data=initialize, sizes=(n,d))
z = torch.tensor([0.0], dtype=torch.double)
# init_scale = 1.0
if learn_scale:
self.scale_log = nn.Parameter(torch.tensor([initial_scale], dtype=torch.double))
# self.scale_log.register_hook(lambda grad: torch.clamp(grad, -1.0, 1.0))
else:
self.scale_log = torch.tensor([initial_scale], dtype=torch.double, device=device)
self.learn_scale = learn_scale
# self.scale_clamp = 3.0
# logging.info(f"{self} {torch.norm(self.w.data - x)} {x.size()}")
logging.info(f"{self} {self.w.size()}")
def scale(self):
# print(self.scale_log.type(), self.lo_scale.type(), self.hi_scale.type())
# scale = torch.exp(torch.clamp(self.scale_log, -self.thres, self.thres))
# scale = torch.exp(self.scale_log.tanh()*self.scale_clamp)
# return torch.sqrt(self.scale_log)
scale = torch.exp(self.scale_log)
# scale = self.scale_log
# scale = scale if self.learn_scale else 1.0
return scale
def dist_idx(self, idx):
# print("idx shape: ", idx.size(), "values shape: ", values.size())
wi = torch.index_select(self.w, 0, idx[:,0])
wj = torch.index_select(self.w, 0, idx[:,1])
d = self.dist_fn(wi,wj)
return d * self.scale() # rescale to the size of the true distances matrix
def dist_row(self, i):
m = self.w.size(0)
return self.dist_fn(self.w[i,:].clone().unsqueeze(0).repeat(m,1), self.w) * self.scale()
def dist_matrix(self):
m = self.w.size(0)
rets = torch.zeros(m, m, dtype=torch.double, device=device)
for i in range(m):
rets[i,:] = self.dist_row(i)
return rets
def normalize(self):
self.w.proj()
# if self.project:
# self.w.proj()
# print("normalize: scale ", self.scale.data)
# print(type(self.scale_log), self.scale_log.type())
# self.scale_log = torch.clamp(self.scale_log, self.lo_scale, self.hi_scale)
| hyperbolics-master | pytorch/hyperbolic_models.py |
# Should be moved to utility
from multiprocessing import Pool
import networkx as nx
import scipy.sparse.csgraph as csg
import logging
import numpy as np
def djikstra_wrapper( _x ):
(mat, x) = _x
return csg.dijkstra(mat, indices=x, unweighted=False, directed=False)
def build_distance(G, scale, num_workers=None):
n = G.order()
p = Pool() if num_workers is None else Pool(num_workers)
#adj_mat_original = nx.to_scipy_sparse_matrix(G)
adj_mat_original = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
# Simple chunking
nChunks = 128 if num_workers is not None and num_workers > 1 else n
if n > nChunks:
chunk_size = n//nChunks
extra_chunk_size = (n - (n//nChunks)*nChunks)
chunks = [ list(range(k*chunk_size, (k+1)*chunk_size)) for k in range(nChunks)]
if extra_chunk_size >0: chunks.append(list(range(n-extra_chunk_size, n)))
Hs = p.map(djikstra_wrapper, [(adj_mat_original, chunk) for chunk in chunks])
H = np.concatenate(Hs,0)
logging.info(f"\tFinal Matrix {H.shape}")
else:
H = djikstra_wrapper( (adj_mat_original, list(range(n))) )
H *= scale
return H
def build_distance_hyperbolic(G, scale):
return np.cosh(build_distance(G,scale)-1.)/2.
def dist_sample_rebuild(dm, alpha):
dist_mat = np.copy(dm)
n,_ = dist_mat.shape
keep_dists = np.random.binomial(1,alpha,(n,n))
# sample matrix:
for i in range(n):
for j in range(n):
dist_mat[i,j] = -1 if keep_dists[i,j] == 0 and i!=j else dist_mat[i,j]
# use symmetry first for recovery:
for i in range(n):
for j in range(i+1,n):
if dist_mat[i,j] == -1 and dist_mat[j,i] > 0:
dist_mat[i,j] = dist_mat[j,i]
if dist_mat[j,i] == -1 and dist_mat[i,j] > 0:
dist_mat[j,i] = dist_mat[i,j]
# now let's rebuild it with triangle inequality:
largest_dist = np.max(dist_mat)
for i in range(n):
for j in range(i+1,n):
# missing distance:
if dist_mat[i,j] == -1:
dist = largest_dist
for k in range(n):
if dist_mat[i,k] > 0 and dist_mat[j,k] > 0 and dist_mat[i,k]+dist_mat[j,k] < dist:
dist = dist_mat[i,k]+dist_mat[j,k]
dist_mat[i,j] = dist
dist_mat[j,i] = dist
return dist_mat
def dist_sample_rebuild_pos_neg(dm, alpha):
n,_ = dm.shape
dist_mat = -1*np.ones((n,n))
pos_edges = np.argwhere(dm == 1)
neg_edges = np.argwhere(dm > 1)
num_pos,_ = np.shape(pos_edges)
num_neg,_ = np.shape(neg_edges)
# balance the sampling rates, if possible
sr_pos = min(1,alpha*n*n/(2*num_pos))
sr_neg = min(1,alpha*n*n/(2*num_neg))
keep_pos_edges = np.random.binomial(1, sr_pos, num_pos)
keep_neg_edges = np.random.binomial(1, sr_neg, num_neg)
logging.info(f"\tPositive edges {num_pos} , negative edges {num_neg}")
logging.info(f"\tSampled {sum((keep_pos_edges == 1).astype(int))} positive edges")
logging.info(f"\tSampled {sum((keep_neg_edges == 1).astype(int))} negative edges")
# sample matrix:
for i in range(num_pos):
if keep_pos_edges[i] == 1:
dist_mat[pos_edges[i][0],pos_edges[i][1]] = 1
for i in range(num_neg):
if keep_neg_edges[i] == 1:
dist_mat[neg_edges[i][0],neg_edges[i][1]] = dm[neg_edges[i][0], neg_edges[i][1]]
# use symmetry first for recovery:
for i in range(n):
dist_mat[i,i] = 0
for j in range(i+1,n):
if dist_mat[i,j] == -1 and dist_mat[j,i] > 0:
dist_mat[i,j] = dist_mat[j,i]
if dist_mat[j,i] == -1 and dist_mat[i,j] > 0:
dist_mat[j,i] = dist_mat[i,j]
# now let's rebuild it with triangle inequality:
largest_dist = np.max(dist_mat)
for i in range(n):
for j in range(i+1,n):
# missing distance:
if dist_mat[i,j] == -1:
dist = largest_dist
for k in range(n):
if dist_mat[i,k] > 0 and dist_mat[j,k] > 0 and dist_mat[i,k]+dist_mat[j,k] < dist:
dist = dist_mat[i,k]+dist_mat[j,k]
dist_mat[i,j] = dist
dist_mat[j,i] = dist
return dist_mat
| hyperbolics-master | pytorch/graph_helpers.py |
import torch
from torch import nn
from torch.autograd import Variable
import logging
import numpy as np, math
import random
def dot(x,y): return torch.sum(x * y, -1)
def acosh(x):
return torch.log(x + torch.sqrt(x**2-1))
class RParameter(nn.Parameter):
def __new__(cls, data=None, requires_grad=True, sizes=None, exp=False):
if data is None:
assert sizes is not None
data = (1e-3 * torch.randn(sizes, dtype=torch.double)).clamp_(min=-3e-3,max=3e-3)
#TODO get partial data if too big i.e. data[0:n,0:d]
ret = super().__new__(cls, data, requires_grad=requires_grad)
# ret.data = data
ret.initial_proj()
ret.use_exp = exp
return ret
@staticmethod
def _proj(x):
raise NotImplemented
def proj(self):
self.data = self.__class__._proj(self.data.detach())
# print(torch.norm(self.data, dim=-1))
def initial_proj(self):
""" Project the initialization of the embedding onto the manifold """
self.proj()
def modify_grad_inplace(self):
pass
@staticmethod
def correct_metric(ps):
for p in ps:
if isinstance(p,RParameter):
p.modify_grad_inplace()
# TODO can use kwargs instead of pasting defaults
class HyperboloidParameter(RParameter):
def __new__(cls, data=None, requires_grad=True, sizes=None, exp=True):
if sizes is not None:
sizes = list(sizes)
sizes[-1] += 1
return super().__new__(cls, data, requires_grad, sizes, exp)
@staticmethod
def dot_h(x,y):
return torch.sum(x * y, -1) - 2*x[...,0]*y[...,0]
@staticmethod
def norm_h(x):
assert torch.all(HyperboloidParameter.dot_h(x,x) >= 0), torch.min(HyperboloidParameter.dot_h(x,x))
return torch.sqrt(torch.clamp(HyperboloidParameter.dot_h(x,x), min=0.0))
@staticmethod
def dist_h(x,y):
# print("before", x, y)
# print("before dots", HyperboloidParameter.dot_h(x,x)+1, HyperboloidParameter.dot_h(y,y)+1)
# print("after dots", -HyperboloidParameter.dot_h(x,y))
# return acosh(-HyperboloidParameter.dot_h(x,y) - 1e-7)
bad = torch.min(-HyperboloidParameter.dot_h(x,y) - 1.0)
if bad <= -1e-4:
print("bad dist", bad.item())
# assert torch.all(-HyperboloidParameter.dot_h(x,y) >= 1.0 - 1e-4), torch.min(-HyperboloidParameter.dot_h(x,y) - 1.0)
# we're dividing by dist_h somewhere so we can't have it be 0, force dp > 1
return acosh(torch.clamp(-HyperboloidParameter.dot_h(x,y), min=(1.0+1e-8)))
@staticmethod
def _proj(x):
""" Project onto hyperboloid """
x_ = torch.tensor(x)
x_tail = x_[...,1:]
current_norms = torch.norm(x_tail,2,-1)
scale = (current_norms/1e7).clamp_(min=1.0)
x_tail /= scale.unsqueeze(-1)
x_[...,1:] = x_tail
x_[...,0] = torch.sqrt(1 + torch.norm(x_tail,2,-1)**2)
debug = True
if debug:
bad = torch.min(-HyperboloidParameter.dot_h(x_,x_))
if bad <= 0.0:
print("way off hyperboloid", bad)
assert torch.all(-HyperboloidParameter.dot_h(x_,x_) > 0.0), f"way off hyperboloid {torch.min(-HyperboloidParameter.dot_h(x_,x_))}"
xxx = x_ / torch.sqrt(torch.clamp(-HyperboloidParameter.dot_h(x_,x_), min=0.0)).unsqueeze(-1)
return xxx
# return x / (-HyperboloidParameter.norm_h(x)).unsqueeze(-1)
def initial_proj(self):
""" Project the initialization of the embedding onto the manifold """
self.data[...,0] = torch.sqrt(1 + torch.norm(self.data.detach()[...,1:],2,-1)**2)
self.proj()
def exp(self, lr):
""" Exponential map """
x = self.data.detach()
# print("norm", HyperboloidParameter.norm_h(x))
v = -lr * self.grad
retract = False
if retract:
# retraction
# print("retract")
self.data = x + v
else:
# print("tangent", HyperboloidParameter.dot_h(x, v))
assert torch.all(1 - torch.isnan(v))
n = self.__class__.norm_h(v).unsqueeze(-1)
assert torch.all(1 - torch.isnan(n))
n.clamp_(max=1.0)
# e = torch.cosh(n)*x + torch.sinh(n)*v/n
mask = torch.abs(n)<1e-7
cosh = torch.cosh(n)
cosh[mask] = 1.0
sinh = torch.sinh(n)
sinh[mask] = 0.0
n[mask] = 1.0
e = cosh*x + sinh/n*v
# assert torch.all(-HyperboloidParameter.dot_h(e,e) >= 0), torch.min(-HyperboloidParameter.dot_h(e,e))
self.data = e
self.proj()
def modify_grad_inplace(self):
""" Convert Euclidean gradient into Riemannian """
self.grad[...,0] *= -1
#print("check data")
#print(np.argwhere(torch.isnan(self.data).cpu().numpy()))
#print("check grad")
#print(np.argwhere(torch.isnan(self.grad).cpu().numpy()))
# self.grad += self.__class__.dot_h(self.data, self.grad).unsqueeze(-1) * self.data
self.grad -= self.__class__.dot_h(self.data, self.grad).unsqueeze(-1) / HyperboloidParameter.dot_h(self.data, self.data).unsqueeze(-1) * self.data
# TODO:
# 1. Improve speed up of projection by making operations in place.
class PoincareParameter(RParameter):
def __new__(cls, data=None, requires_grad=True, sizes=None, check_graph=False):
ret = super().__new__(cls, data, requires_grad, sizes)
ret.check_graph = check_graph
return ret
def modify_grad_inplace(self):
# d = self.data.dim()
w_norm = torch.norm(self.data,2,-1, True)
# This is the inverse of the remanian metric, which we need to correct for.
hyper_b = (1 - w_norm**2)**2/4
# new_size = tuple([1] * (d - 1) + [self.data.size(d-1)])
# self.grad *= hyper_b.repeat(*new_size) # multiply pointwise
self.grad *= hyper_b # multiply pointwise
self.grad.clamp_(min=-10000.0, max=10000.0)
# We could do the projection here?
# NB: THIS IS DEATHLY SLOW. FIX IT
if self.check_graph and np.any(np.isnan(self.grad.data.cpu().numpy())):
print(np.any(np.isnan(self.data.cpu().numpy())))
print(np.any(np.isnan(self.grad.data.cpu().numpy())))
print(np.any(np.isnan(w_norm.cpu().numpy())))
raise ValueError("NaN During Hyperbolic")
@staticmethod
def _correct(x, eps=1e-10):
current_norms = torch.norm(x,2,x.dim() - 1)
mask_idx = current_norms < 1./(1+eps)
modified = 1./((1+eps)*current_norms)
modified[mask_idx] = 1.0
#new_size = [1]*current_norms.dim() + [x.size(x.dim()-1)]
#return modified.unsqueeze(modified.dim()).repeat(*new_size)
# return modified.unsqueeze(modified.dim()).expand(x.size())
return modified.unsqueeze(-1)
@staticmethod
def _proj(x, eps=1e-10):
return x * PoincareParameter._correct(x, eps=eps)
# def proj(self, eps=1e-10):
# self.data = self.__class__._proj(self.data.detach())#PoincareParameter._correct(self.data, eps=eps)
def __repr__(self):
return 'Hyperbolic parameter containing:' + self.data.__repr__()
class SphericalParameter(RParameter):
def __new__(cls, data=None, requires_grad=True, sizes=None, exp=True):
if sizes is not None:
sizes = list(sizes)
sizes[-1] += 1
return super().__new__(cls, data, requires_grad, sizes, exp)
def modify_grad_inplace(self):
""" Convert Euclidean gradient into Riemannian by projecting onto tangent space """
# pass
self.grad -= dot(self.data, self.grad).unsqueeze(-1) * self.data
def exp(self, lr):
x = self.data.detach()
v = -lr*self.grad
retract = False
if retract:
# retraction
self.data = x + v
else:
n = torch.norm(v, 2, -1, keepdim=True)
mask = torch.abs(n)<1e-7
cos = torch.cos(n)
cos[mask] = 1.0
sin = torch.sin(n)
sin[mask] = 0.0
n[torch.abs(n)<1e-7] = 1.0
e = cos*x + sin*v/n
self.data = e
self.proj()
@staticmethod
def _proj(x):
# return x / torch.norm(x, 2, -1).unsqueeze(-1)
return x / torch.norm(x, 2, -1, True)
# def proj(self):
# x = self.data.detach()
# self.data = SphericalParameter._proj(x)
def initial_proj(self):
# pass
self.data[...,0] = torch.sqrt(1 - torch.norm(self.data[...,1:],2,-1)**2)
class EuclideanParameter(RParameter):
def proj(x):
pass
| hyperbolics-master | pytorch/hyperbolic_parameter.py |
import nltk
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
import operator
from collections import defaultdict
import numpy as np
import networkx as nx
import json
from collections import defaultdict
"""Script assumes input text is in GloVe format."""
# Some definitions
# Reflection (circle inversion of x through orthogonal circle centered at a)
def isometric_transform(a, x):
r2 = np.linalg.norm(a)**2 - (1.0)
return r2/np.linalg.norm(x - a)**2 * (x-a) + a
# Inversion taking mu to origin
def reflect_at_zero(mu,x):
a = mu/np.linalg.norm(mu)**2
return isometric_transform(a,x)
def acosh(x):
return np.log(x + np.sqrt(x**2-1))
# Hyperbolic distance
def dist(u,v):
z = 2 * np.linalg.norm(u-v)**2
uu = 1. + z/((1-np.linalg.norm(u)**2)*(1-np.linalg.norm(v)**2))
return acosh(uu)
# Hyperbolic distance from 0
def hyp_dist_origin(x):
return np.log((1+np.linalg.norm(x))/(1-np.linalg.norm(x)))
# Scalar multiplication w*x
def hyp_scale(w, x):
sgn = (-1.0)**float(w<0)
w *= sgn
if w == 1:
return sgn*x
else:
x_dist = (1+np.linalg.norm(x))/(1-np.linalg.norm(x))
alpha = 1-2/(1+x_dist**w)
alpha *= 1/np.linalg.norm(x)
return sgn*alpha*x
# Convex combination (1-w)*x+w*y
def hyp_conv_comb(w, x, y):
# circle inversion sending x to 0
(xinv, yinv) = (reflect_at_zero(x, x), reflect_at_zero(x, y))
# scale by w
pinv = hyp_scale(w, yinv)
# reflect back
return reflect_at_zero(x, pinv)
# Weighted sum w1*x + w2*y
def hyp_weighted_sum(w1, w2, x, y):
p = hyp_conv_comb(w2 / (w1 + w2), x, y)
return hyp_scale(w1 + w2, p)
vector_dim = 21
file = "wordnet_full.txt"
with open(file, 'r') as emb:
emb_lines = emb.readlines()
relTau = np.float64(emb_lines[0])
emb_lines = emb_lines[1:]
emb_dict = dict()
IDtoWords = dict()
WordstoIDs = dict()
for idx, line in enumerate(emb_lines):
curr_line = line.split(" ")
curr_syn = curr_line[0]
emb_dict[curr_syn] = np.asarray(list(map(np.float64, curr_line[1:])))
IDtoWords[idx] = curr_syn
WordstoIDs[curr_syn] = idx
vocab_size = len(emb_dict)
W = np.zeros((vocab_size, vector_dim))
for word, vec in emb_dict.items():
idx = WordstoIDs[word]
W[idx,:] = vec
# Find the top 10 nearest neighbors to a particular synset for given relationship.
e1 = wn.synset('geometry.n.01')
vec_e1 = emb_dict[str(e1)]
curr_dist = []
for row_idx in range(W.shape[0]):
curr_vec = W[row_idx,:]
normalized_dist = (dist(curr_vec,vec_e1))/relTau
curr_dist.append(normalized_dist)
e1_idx = WordstoIDs[str(e1)]
curr_dist[e1_idx] = np.Inf
curr_closest_indices = np.argsort(curr_dist)[:10]
for r_idx in curr_closest_indices:
relev_syn = IDtoWords[r_idx]
print(relev_syn)
# Analogy experiments.
e1 = wn.synset('plane_geometry.n.01')
e1_idx = WordstoIDs[str(e1)]
e2 = wn.synset('geometry.n.01')
e2_idx = WordstoIDs[str(e2)]
e3 = wn.synset('novelist.n.01')
e3_idx = WordstoIDs[str(e3)]
vec_e1 = emb_dict[str(e1)]
vec_e2 = emb_dict[str(e2)]
vec_e3 = emb_dict[str(e3)]
vec1_ = hyp_scale(-1, vec_e1)
left_sum = hyp_weighted_sum(1, 1, vec_e2, vec1_)
vec_search = hyp_weighted_sum(1, 1, left_sum, vec_e3)
curr_dist = []
for row_idx in range(W.shape[0]):
curr_vec = W[row_idx,:]
normalized_dist = (dist(curr_vec, vec_search))/relTau
curr_dist.append(normalized_dist)
curr_dist[e1_idx] = np.Inf
curr_dist[e2_idx] = np.Inf
curr_dist[e3_idx] = np.Inf
curr_closest_indices = np.argsort(curr_dist)[:10]
for r_idx in curr_closest_indices:
relev_syn = IDtoWords[r_idx]
print(relev_syn)
| hyperbolics-master | pytorch/analysis/intrinsic.py |
import logging, argh
import os, sys
import networkx as nx
import random
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
import matplotlib as mpl
if torch.cuda.is_available(): mpl.use('Agg')
import matplotlib.pyplot as plt
if torch.cuda.is_available(): plt.ioff()
import scipy
import scipy.sparse.csgraph as csg
import pandas
import numpy as np, math
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, root_dir)
# import utils.load_graph as load_graph
# import utils.vis as vis
# import utils.distortions as dis
# import graph_helpers as gh
# import mds_warmstart
from hyperbolic_models import ProductEmbedding
import json
from hyperbolic_parameter import RParameter
with open("wn_IDtoSyns.txt") as d:
IDtoSyns = json.load(d)
m = torch.load("wordnet_full.emb")
spherical_embs = [S.w for S in m.S]
euclidean_embs = [E.w for E in m.E]
hyperbolic_embs = [H.w for H in m.H]
hyperbolic_matrix = (hyperbolic_embs[0].cpu()).data.numpy()
scale = np.float64(m.scale_params[0].cpu().data.numpy())
print(hyperbolic_matrix)
print(scale)
#Matching the IDs to entities.
final_emb = dict()
for i in range(0, hyperbolic_matrix.shape[0]):
syn = IDtoSyns[str(i)]
vector = hyperbolic_matrix[i]
final_emb[syn] = vector
lines = ["Scaling factor "+str(scale)]
for key in final_emb.keys():
curr_line = str(key) + " " + " ".join(list(map(str,final_emb[key])))
lines.append(curr_line)
with open('wordnet_full.txt', 'w') as f:
f.write('\n'.join(lines))
| hyperbolics-master | pytorch/analysis/postprocess.py |
import networkx as nx
# wrapper for nx.bfs_tree that keeps weights
def get_BFS_tree(G, src):
G_BFS = nx.bfs_tree(G, src)
for edge in G_BFS.edges():
if G[edge[0]][edge[1]]:
G_BFS.add_edge(edge[0], edge[1], weight=G[edge[0]][edge[1]]['weight'])
return G_BFS
def max_degree(G):
max_d = 0;
max_node = -1;
for deg in G.degree(G.nodes()):
if deg[1] > max_d:
max_d = deg[1]
max_node = deg[0]
return [max_node, max_d]
# looks at first edge to determine if weighted
def is_weighted(G):
if len(list(G.edges(data=True))[0][2]):
return True
return False
| hyperbolics-master | combinatorial/graph_util.py |
import os
import argh
import numpy as np
import pandas
import networkx as nx
import scipy.sparse.csgraph as csg
from timeit import default_timer as timer
from multiprocessing import Pool
import utils.load_graph as lg
import utils.distortions as dis
import graph_util as gu
def compute_row_stats(i, n, adj_mat_original, hyp_dist_row, weighted, verbose=False):
# the real distances in the graph
true_dist_row = csg.dijkstra(adj_mat_original, indices=[i], unweighted=(not weighted), directed=False).squeeze()
# true_dist_row = csg.dijkstra(adj_mat_original, indices=[i], unweighted=True, directed=True).squeeze()
# print(f"{i}: {true_dist_row}")
# row MAP
neighbors = adj_mat_original.todense()[i].A1
# print(f"row {i}: ", neighbors)
# print("shape", neighbors.shape)
row_map = dis.map_row(neighbors, hyp_dist_row, n, i)
# distortions: worst cases (contraction, expansion) and average
dc, de, avg, _ = dis.distortion_row(true_dist_row, hyp_dist_row, n, i)
# dc, de, avg = 0.0, 0.0, 0.0
# print out stats for this row
if verbose:
print(f"Row {i}, MAP = {curr_map}, distortion = {avg}, d_c = {dc}, d_e = {de}")
return (row_map, avg, dc, de)
@argh.arg("dataset", help="Dataset to compute stats for")
@argh.arg("d_file", help="File with embedded distance matrix")
# @argh.arg("-s", "--save", help="File to save final stats to")
@argh.arg("-q", "--procs", help="Number of processors to use")
@argh.arg("-v", "--verbose", help="Print more detailed stats")
def stats(dataset, d_file, procs=1, verbose=False):
start = timer()
# Load graph
G = lg.load_graph(dataset, directed=True)
n = G.order()
weighted = gu.is_weighted(G)
print("G: ", G.edges)
adj_mat_original = nx.to_scipy_sparse_matrix(G, range(0,n))
print(f"Finished loading graph. Elapsed time {timer()-start}")
# Load distance matrix chunks
hyp_dist_df = pandas.read_csv(d_file, index_col=0)
loaded = timer()
print(f"Finished loading distance matrix. Elapsed time {loaded-start}")
rows = hyp_dist_df.index.values
hyp_dist_mat = hyp_dist_df.as_matrix()
n_ = rows.size
_map = np.zeros(n_)
_d_avg = np.zeros(n_)
_dc = np.zeros(n_)
_de = np.zeros(n_)
for (i, row) in enumerate(rows):
# if row == 0: continue
(_map[i], _d_avg[i], _dc[i], _de[i]) = compute_row_stats(row, n, adj_mat_original, hyp_dist_mat[i,:], weighted=weighted, verbose=verbose)
map_ = np.sum(_map)
d_avg_ = np.sum(_d_avg)
dc_ = np.max(_dc)
de_ = np.max(_de)
if weighted:
print("Note: MAP is not well defined for weighted graphs")
# Final stats:
# n_ -= 1
print(f"MAP = {map_/n_}, d_avg = {d_avg_/n_}, d_wc = {dc_*de_}, d_c = {dc_}, d_e = {de_}")
end = timer()
print(f"Finished computing stats. Total elapsed time {end-start}")
with open(f"{d_file}.stats", "w") as stats_log:
stats_log.write(f"{n_},{map_},{d_avg_},{dc_},{de_}\n")
print(f"Stats saved to {d_file}.stats")
print()
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.set_default_command(stats)
_parser.dispatch()
| hyperbolics-master | combinatorial/stats.py |
import numpy as np
import os
filename = 'ha30.txt'
# fileout = 'usca312.edges'
if __name__ == '__main__':
base, ext = os.path.splitext(filename)
fileout = f'{base}.edges'
D = np.loadtxt(filename)
print(D.shape)
n = D.shape[0]
with open(fileout, 'w') as fout:
for i in range(n):
for j in range(i+1,n):
e = np.minimum(D[i][j], D[j][i])
fout.write(f'{i} {j} {e/1000.}\n')
| hyperbolics-master | data/edges/preprocess_dist_matrix.py |
import networkx as nx
def make_ancestor_closure(G, name=None):
G_BFS = nx.bfs_tree(G, 0)
G_A = nx.Graph()
if name is not None:
f = open(name + ".edges", 'w')
for node in G_BFS.nodes():
curr = node
while len(list(G_BFS.predecessors(curr))):
curr = list(G_BFS.predecessors(curr))[0]
G_A.add_edge(node, curr)
if name is not None:
f.write(str(node) + "\t" + str(curr) + "\n")
f.close()
return G_A
def save_edges(G, name, data=False):
f = open(name + ".edges", 'w')
for edge in G.edges(data=data):
if data:
f.write(str(edge[0]) + "\t" + str(edge[1]) + "\t" + str(edge[2]['weight']) + "\n")
else:
f.write(str(edge[0]) + "\t" + str(edge[1]) + "\n")
f.close()
def make_tree_weights(G, name=None):
G_DFS = nx.dfs_tree(G, 0)
G_BFS = nx.bfs_tree(G_DFS, 0)
G_W = nx.Graph()
curr_nodes = [0]
next_nodes = []
depth = 0
while 1:
if len(curr_nodes) == 0:
if len(next_nodes) == 0:
break
depth += 1
curr_nodes = next_nodes.copy()
next_nodes.clear()
node = curr_nodes[0]
parent = list(G_BFS.predecessors(node))
if len(parent) > 0:
G_W.add_edge(node, parent[0], weight=3**(depth-1))
curr_nodes.remove(node)
next_nodes += list(G_BFS.successors(node))
if name is not None:
save_edges(G_W, name, data=True)
return G_W
if __name__ == '__main__':
G = nx.balanced_tree(2,3)
G.add_edge(9,10)
make_ancestor_closure(G, 'testclosure')
make_tree_weights(G, 'weighted_testanc')
nx.write_edgelist(G, 'test.edges', data=False)
| hyperbolics-master | data/edges/ancestor_tests.py |
import numpy as np
import networkx as nx
import itertools
import argh
cycle_nodes = 10
tree = nx.balanced_tree(2, 2)
nx.relabel_nodes(tree, {n : n+1 for n in tree.nodes}, copy=False)
tree.add_edge(0, 1)
tree_nodes = len(tree.nodes())
copies = []
for i in range(cycle_nodes):
T = tree.copy()
copies.append(nx.relabel_nodes(T, {n : cycle_nodes * n + i for n in T.nodes}))
G = nx.compose_all(copies + [nx.cycle_graph(cycle_nodes)])
# G = nx.compose_all(copies)
nx.write_edgelist(G, "cycle-tree.edges", data=False)
| hyperbolics-master | data/edges/synthetic/cycle-tree.py |
import numpy as np
import networkx as nx
import itertools
import argh
d = 6
edges = [(0,1), (1,2), (2,3), (3,0)]
n = 4
for t in range(d-1):
edges2 = []
for u,v in edges:
edges2 += [(u, n), (n, v), (v, n+1), (n+1, u)]
n += 2
edges = edges2
nx.write_edgelist(nx.Graph(edges), f"diamond{d}.edges", data=False)
| hyperbolics-master | data/edges/synthetic/diamond.py |
import numpy as np
import networkx as nx
import sys, os
import subprocess
# generate some random trees on the same nodes:
n = 300
t = 5
g_list = []
for i in range(t):
g_list.append(nx.random_tree(n))
# compress the tree:
G = nx.Graph()
for node in range(n):
for tree in range(t):
for edge in g_list[tree].edges(node):
G.add_edge(edge[0], edge[1])
nx.write_edgelist(G, 'compressed_tree.edges', data=False)
| hyperbolics-master | data/edges/synthetic/compressed_tree.py |
import numpy as np
import networkx as nx
import itertools
import argh
# construct generalized Sierpinski graph
# vertices: strings of length d chosen from [n]
def construct(n=3, d=2, base='clique'):
if base in ['clique', 'K', 'k']:
base = 'K'
base_graph = list(nx.complete_graph(n).edges)
if base in ['cycle', 'C', 'c']:
base = 'C'
base_graph = list(nx.cycle_graph(n).edges)
G = nx.Graph()
for t in range(0, d):
choices = [list(range(n))]*t
for prefix in itertools.product(*choices):
# for p in range(n):
# for q in range(p):
for p,q in base_graph:
a = list(prefix) + [p] + [q]*(d-t-1)
b = list(prefix) + [q] + [p]*(d-t-1)
G.add_edge(tuple(a), tuple(b))
def idx(L, base):
if len(L) == 1: return L[0]
return L[-1] + base*idx(L[:-1], base)
mapping = {L : idx(list(L), n) for L in itertools.product(*([list(range(n))]*d))}
G = nx.relabel_nodes(G, mapping, copy=False)
nx.write_edgelist(G, f"sierp-{base}{n}-{d}.edges", data=False)
if __name__ == '__main__':
_parser = argh.ArghParser()
_parser.set_default_command(construct)
_parser.dispatch()
| hyperbolics-master | data/edges/synthetic/sierpinski.py |
import numpy as np
import os
# -----------------------------------------------------------------------------------------------
# Read in proto
# -----------------------------------------------------------------------------------------------
solver = 'inputs/caffenet_solver_8_4gpu.prototxt'
train_val = 'inputs/caffenet_train_val_8_4gpu.prototxt'
cct_path = '/home/software/CaffeConTroll/caffe-ct'
# -----------------------------------------------------------------------------------------------
# Parameters
# -----------------------------------------------------------------------------------------------
LR_list = [0.0001, 0.0002, 0.0004, 0.0008, 0.0016, 0.0032, 0.0064, 0.0128, 0.0256, 0.0512, 0.1024, 0.2048, 0.4096, 0.8192, 1.6384]
# M_list = [0.0, 0.3, 0.6, 0.9] # No staleness
M_list = [0.9]
B_list = [4,16,64,256]
# Do in intervals of highest batch size so it is fair / equal
biggest_batch = max(B_list)
phase1_num_big_batch_iter = 40
phase1_running_avg = 8
phase2_num_big_batch_iter = 1200
# -----------------------------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------------------------
def run_experiment(solver, train_val, lr, m, b, num_iter, display, experiment_name):
# Make the output directory
output_dir = 'outputs/B' + str(b) + '/' + experiment_name + '/experiment_LR' + str(lr) + '_M' + str(m)
os.system('mkdir -p ' + output_dir)
solver_out_name = output_dir + '/solver.prototxt'
train_val_out_name = output_dir + '/train_val.prototxt'
# Open the solver
solver_out = open(solver_out_name, 'w')
solver_in = open(solver)
for line in solver_in:
if 'momentum:' in line:
solver_out.write('momentum: ' + str(m) + "\n")
elif 'base_lr:' in line:
solver_out.write('base_lr: ' + str(lr) + "\n")
elif 'max_iter:' in line:
solver_out.write('max_iter: ' + str(num_iter) + "\n")
elif 'display:' in line:
solver_out.write('display: ' + str(display) + "\n")
elif 'net:' in line:
solver_out.write('net: \"' + train_val_out_name + "\"\n")
else:
solver_out.write(line)
solver_in.close()
solver_out.close()
# Open the train_val
train_val_out = open(train_val_out_name, 'w')
train_val_in = open(train_val)
for line in train_val_in:
if ' batch_size:' in line:
train_val_out.write(' batch_size: ' + str(b) + "\n")
else:
train_val_out.write(line)
train_val_in.close()
train_val_out.close()
# Run CcT
log_out = output_dir + '/log.out'
cct_cmd = cct_path + ' train ' + solver_out_name + ' > ' + log_out + ' 2>&1'
print cct_cmd
os.system(cct_cmd)
# Parse that log
logfile = open(log_out)
acc_lines = []
for line in logfile:
if 'PARSE' in line:
# PARSE 100 0.1278 0.878
acc_lines.append(float(line.strip().split()[-1]))
logfile.close()
# Return the accuracies each iter
return acc_lines
# -----------------------------------------------------------------------------------------------
# Run experiments
# -----------------------------------------------------------------------------------------------
for b in B_list:
print '--------------------------------------------------------------------------------'
print 'Beginning batch size ' + str(b)
print '--------------------------------------------------------------------------------'
# First round: Run 1 min each LR
best_3 = []
acc_to_lr_m = {}
for lr in LR_list:
for m in M_list:
# The # images to process is is the biggest batch times phase1_num_big_batch_iter
total_num_imgs_to_process = biggest_batch*phase1_num_big_batch_iter
# The # iter is this divided by the batch size
num_iter = total_num_imgs_to_process/b
# The display freq is biggest_batch / b, i.e. the total # displays will be phase1_num_big_batch_iter
display = biggest_batch / b
# Run experiment
accuracies = run_experiment(solver, train_val, lr, m, b, num_iter, display, 'phase1')
assert len(accuracies) == phase1_num_big_batch_iter
# Note each list element is a display, i.e. it is biggest_batch images processed
final_acc = sum(accuracies[-phase1_running_avg:])/phase1_running_avg
print ' Final Acc = ' + str(final_acc)
# Check if this is top 3, if so add it
if len(best_3) < 3:
best_3.append(final_acc)
acc_to_lr_m[final_acc] = (lr, m)
best_3 = sorted(best_3)
elif final_acc > min(best_3):
assert min(best_3) == best_3[0]
best_3 = best_3[1:]
best_3.append(final_acc)
acc_to_lr_m[final_acc] = (lr, m)
best_3 = sorted(best_3)
print ' -> best_3 = ' + str(best_3)
# Second round: Pick best 3 learning rates and run longer
print ''
for k in best_3:
print 'Running ' + str(acc_to_lr_m[k])
lr = acc_to_lr_m[k][0]
m = acc_to_lr_m[k][1]
# The # images to process is is the biggest batch times phase1_num_big_batch_iter
total_num_imgs_to_process = biggest_batch*phase2_num_big_batch_iter
# The # iter is this divided by the batch size
num_iter = total_num_imgs_to_process/b
# The display freq is biggest_batch / b, i.e. the total # displays will be phase1_num_big_batch_iter
display = biggest_batch / b
# Run experiment
accuracies = run_experiment(solver, train_val, lr, m, b, num_iter, display, 'phase2')
| CaffeConTroll-master | experiments/batch/batch.py |
import operator
import sys
if len(sys.argv) != 2:
print 'Usage: >>> python process.py filename'
sys.exit(0)
total_num_iters = 0
f = open(sys.argv[1])
current_layer = ''
layer_to_time = {}
for line in f:
line = line.strip()
if 'BATCH:' in line:
total_num_iters += 1
elif 'Time Elapsed' in line:
layer_to_time[current_layer].append(float((line.split())[3]))
elif 'REPORT' in line:
if line not in layer_to_time.keys():
layer_to_time[line] = []
current_layer = line
print 'Detailed Profiling Report'
print 'Average over ' + str(total_num_iters) + ' iterations'
# Make a new dict which maps to the mean only
layer_to_mean_time = {}
for k in layer_to_time.keys():
time_list = layer_to_time[k]
total_sum = sum(time_list)
mean = total_sum / total_num_iters
layer_to_mean_time[k] = mean
# Now print the means sorted
sorted_by_mean = sorted(layer_to_mean_time.items(), key=operator.itemgetter(1))
for layer_mean in reversed(sorted_by_mean):
print layer_mean[0] + "\t" + str(layer_mean[1])
f.close()
| CaffeConTroll-master | tests/process_detailed_profiling.py |
import sys
import random
if len(sys.argv) != 2:
print 'Usage: >>> python generate_conv_test.py test_name'
sys.exit(0)
mB = 4
iD = 3
oD = 8
iR = 127
iC = 127
k = 11
s = 4
p = 2
test_name = sys.argv[1]
fname = 'input/conv_forward_in_' + test_name + '.txt'
f = open(fname, 'w')
print 'Creating ' + fname + '...'
for i in range(iR*iC*iD*mB):
r = (1-2*random.random())/10
f.write(str(r) + ' ')
f.close()
fname = 'input/conv_model_' + test_name + '.txt'
f = open(fname, 'w')
print 'Creating ' + fname + '...'
for i in range(k*k*iD*oD):
r = (1-2*random.random())/10
f.write(str(r) + ' ')
f.close()
fname = 'input/conv_bias_in_' + test_name + '.txt'
f = open(fname, 'w')
print 'Creating ' + fname + '...'
for i in range(oD):
r = (1-2*random.random())/10
f.write(str(r) + ' ')
f.close()
fname = 'input/conv_backward_model_' + test_name + '.txt'
f = open(fname, 'w')
print 'Creating ' + fname + '...'
for i in range(k*k*iD*oD):
r = (1-2*random.random())/10
f.write(str(r) + ' ')
f.close()
#fname = 'output/conv_forward_' + test_name + '.txt'
#f = open(fname, 'w')
#f.close()
#
#fname = 'output/conv_backward_' + test_name + '.txt'
#f = open(fname, 'w')
#f.close()
#
#fname = 'output/conv_bias_' + test_name + '.txt'
#f = open(fname, 'w')
#f.close()
#
#fname = 'output/conv_weights_' + test_name + '.txt'
#f = open(fname, 'w')
#f.close()
#
| CaffeConTroll-master | tests/generate_conv_test.py |
import sys
if len(sys.argv) != 2:
print 'Usage: >>> python process_perf_test.py filename'
sys.exit(0)
f = open(sys.argv[1])
test_to_metric_table = {}
current_test = ''
current_metric = ''
for line in f:
line = line.strip()
if not line:
continue
if '[ RUN ]' in line:
current_test = ( line.split() )[-1]
if current_test not in test_to_metric_table.keys():
test_to_metric_table[current_test] = {}
elif 'report_' in line:
current_metric = line
elif 'Time Elapsed' in line:
test_to_metric_table[current_test][current_metric] = (line.split())[3]
for i,test in enumerate(sorted(list(test_to_metric_table.keys()))):
print test
for metric in test_to_metric_table[test]:
print " " + metric[7:] + "\t" + test_to_metric_table[test][metric]
| CaffeConTroll-master | tests/process_perf_test.py |
import sys
########################################################
# Small Examples
########################################################
D = []
D.append(['a', 'b', 'c', 'd'])
D.append(['e', 'f', 'g', 'h'])
D.append(['i', 'j', 'k', 'l'])
D.append(['m', 'n', 'o', 'p'])
n = 4
k = 2
d = 1
b = 1
o = 1
# First no padding, stride=1
p = 0
s = 1
m = (n + 2*p - k)/s + 1
print 'p = ' + str(p)
print 's = ' + str(s)
print 'm = ' + str(m)
print ''
D_lowered = []
for Dl_r in range(m*m*b):
D_lowered.append([])
for bi in range(b):
for r in range(m):
for c in range(m):
current_row = []
for Dr in range(k):
for Dc in range(k):
current_row.append(D[r+Dr][c+Dc])
D_lowered[bi*m*m + r*m + c] = current_row
for Dl_r in range(len(D_lowered)):
for Dl_c in range(len(D_lowered[Dl_r])):
sys.stdout.write(D_lowered[Dl_r][Dl_c])
sys.stdout.write(" & ")
sys.stdout.write("\\\\\n")
print ''
print ''
sys.exit(0)
# stride=2
p = 0
s = 2
m = (n + 2*p - k)/s + 1
print 'p = ' + str(p)
print 's = ' + str(s)
print 'm = ' + str(m)
print ''
D_lowered = []
for Dl_r in range(m*m*b):
D_lowered.append([])
for bi in range(b):
for r in range(m):
for c in range(m):
current_row = []
for Dr in range(k):
for Dc in range(k):
current_row.append(D[r*s+Dr][c*s+Dc])
D_lowered[bi*m*m + r*m + c] = current_row
for Dl_r in range(len(D_lowered)):
for Dl_c in range(len(D_lowered[Dl_r])):
sys.stdout.write(D_lowered[Dl_r][Dl_c])
sys.stdout.write(" & ")
sys.stdout.write("\\\\\n")
print ''
print ''
# stride=2, padding 1
p = 1
s = 2
m = (n + 2*p - k)/s + 1
print 'p = ' + str(p)
print 's = ' + str(s)
print 'm = ' + str(m)
print ''
D_lowered = []
for Dl_r in range(m*m*b):
D_lowered.append([])
for bi in range(b):
for r in range(m):
for c in range(m):
current_row = []
for Dr in range(k):
if r*s-p+Dr >= 0 and r*s-p+Dr < len(D):
for Dc in range(k):
if c*s-p+Dc >= 0 and c*s-p+Dc < len(D[r*s-p+Dr]):
current_row.append(D[r*s-p+Dr][c*s-p+Dc])
else:
current_row.append('0')
else:
for Dc in range(k):
current_row.append('0')
D_lowered[bi*m*m + r*m + c] = current_row
for Dl_r in range(len(D_lowered)):
for Dl_c in range(len(D_lowered[Dl_r])):
sys.stdout.write(D_lowered[Dl_r][Dl_c])
sys.stdout.write(" & ")
sys.stdout.write("\\\\\n")
print ''
print ''
# stride=2, padding 1
p = 1
s = 3
m = (n + 2*p - k)/s + 1
print 'p = ' + str(p)
print 's = ' + str(s)
print 'm = ' + str(m)
print ''
D_lowered = []
for Dl_r in range(m*m*b):
D_lowered.append([])
for bi in range(b):
for r in range(m):
for c in range(m):
current_row = []
for Dr in range(k):
if r*s-p+Dr >= 0 and r*s-p+Dr < len(D):
for Dc in range(k):
if c*s-p+Dc >= 0 and c*s-p+Dc < len(D[r*s-p+Dr]):
current_row.append(D[r*s-p+Dr][c*s-p+Dc])
else:
current_row.append('0')
else:
for Dc in range(k):
current_row.append('0')
D_lowered[bi*m*m + r*m + c] = current_row
for Dl_r in range(len(D_lowered)):
for Dl_c in range(len(D_lowered[Dl_r])):
sys.stdout.write(D_lowered[Dl_r][Dl_c])
sys.stdout.write(" & ")
sys.stdout.write("\\\\\n")
print ''
print ''
########################################################
# Big example
########################################################
D = []
D.append(['a', 'b', 'c', 'd', 'e'])
D.append(['f', 'g', 'h', 'i', 'j'])
D.append(['k', 'l', 'm', 'n', 'o'])
D.append(['p', 'q', 'r', 's', 't'])
D.append(['u', 'v', 'w', 'x', 'y'])
n = 5
k = 3
d = 1
b = 1
o = 1
# stride=2, padding 2, 5x5
p = 2
s = 2
m = (n + 2*p - k)/s + 1
print 'p = ' + str(p)
print 's = ' + str(s)
print 'm = ' + str(m)
print ''
D_lowered = []
for Dl_r in range(m*m*b):
D_lowered.append([])
for bi in range(b):
for r in range(m):
for c in range(m):
current_row = []
for Dr in range(k):
if r*s-p+Dr >= 0 and r*s-p+Dr < len(D):
for Dc in range(k):
if c*s-p+Dc >= 0 and c*s-p+Dc < len(D[r*s-p+Dr]):
current_row.append(D[r*s-p+Dr][c*s-p+Dc])
else:
current_row.append('0')
else:
for Dc in range(k):
current_row.append('0')
D_lowered[bi*m*m + r*m + c] = current_row
for Dl_r in range(len(D_lowered)):
for Dl_c in range(len(D_lowered[Dl_r])):
sys.stdout.write(D_lowered[Dl_r][Dl_c])
sys.stdout.write(" & ")
sys.stdout.write("\\\\\n")
print ''
print ''
| CaffeConTroll-master | docs/lowering/type_1/pad_stride_example.py |
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.semi_supervised import LabelSpreading
from sklearn.linear_model import SGDClassifier
class BaselineModel(object):
"""
A base class for all sklearn-esque baseline methods
"""
def __init__(self, train_primitive_matrix, val_primitive_matrix,
val_ground, train_ground=None, b=0.5):
"""
Initialize DecisionTree object
"""
self.model = None
self.train_primitive_matrix = train_primitive_matrix
self.val_primitive_matrix = val_primitive_matrix
self.val_ground = val_ground
self.train_ground = train_ground
self.b = b
def fit(self, model):
pass
def evaluate(self, input_primitive_matrix=[]):
"""
Calculate the accuracy and coverage for train and validation sets
"""
self.val_marginals = self.model.predict_proba(self.val_primitive_matrix)[:,1]
if input_primitive_matrix!=[]:
self.train_marginals = self.model.predict_proba(input_primitive_matrix)[:,1]
else:
self.train_marginals = self.model.predict_proba(self.train_primitive_matrix)[:,1]
def calculate_accuracy(marginals, b, ground):
#TODO: Use b for class imbalance?
total = np.shape(np.where(marginals != 0.5))[1]
labels = np.sign(2*(marginals - 0.5))
return np.sum(labels == ground)/float(total)
def calculate_coverage(marginals, b, ground):
#TODO: Use b for class imbalance?
total = np.shape(np.where(marginals != 0.5))[1]
labels = np.sign(2*(marginals - 0.5))
return total/float(len(labels))
self.val_accuracy = calculate_accuracy(self.val_marginals, self.b, self.val_ground)
self.train_accuracy = calculate_accuracy(self.train_marginals, self.b, self.train_ground)
self.val_coverage = calculate_coverage(self.val_marginals, self.b, self.val_ground)
self.train_coverage = calculate_coverage(self.train_marginals, self.b, self.train_ground)
if input_primitive_matrix!=[]:
return self.val_accuracy, [], self.val_coverage, []
else:
return self.val_accuracy, self.train_accuracy, self.val_coverage, self.train_coverage
class BoostClassifier(BaselineModel):
"""
AdaBoost Implementation
"""
def fit(self):
self.model = AdaBoostClassifier(random_state=0)
self.model.fit(self.val_primitive_matrix, self.val_ground)
class DecisionTree(BaselineModel):
"""
DecisionTree Implementation
"""
def fit(self):
self.model = DecisionTreeClassifier(random_state=0)
self.model.fit(self.val_primitive_matrix, self.val_ground)
class SemiSupervised(BaselineModel):
"""
LabelSpreading Implementation
"""
def fit(self):
#Need to concatenate labeled and unlabeled data
#unlabeled data labels are set to -1
X = np.concatenate((self.val_primitive_matrix, self.train_primitive_matrix))
val_labels = (self.val_ground+1)/2.
train_labels = -1.*np.ones(np.shape(self.train_primitive_matrix)[0])
y = np.concatenate((val_labels, train_labels))
self.model = LabelSpreading(kernel='knn')
self.model.fit(X, y)
| reef-master | baselines/models.py |
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
| reef-master | baselines/__init__.py |
reef-master | data/__init__.py |
|
import numpy as np
import scipy
import json
import sklearn.cross_validation
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
def parse_file(filename):
def parse(filename):
movies = []
with open(filename) as f:
for line in f:
obj = json.loads(line)
movies.append(obj)
return movies
f = parse(filename)
gt = []
plots = []
idx = []
for i,movie in enumerate(f):
genre = movie['Genre']
if 'Action' in genre and 'Romance' in genre:
continue
elif 'Action' in genre:
plots = plots+[movie['Plot']]
gt.append(1)
idx.append(i)
elif 'Romance' in genre:
plots = plots+[movie['Plot']]
gt.append(-1)
idx.append(i)
else:
continue
return np.array(plots), np.array(gt)
def split_data(X, plots, y):
np.random.seed(1234)
num_sample = np.shape(X)[0]
num_test = 500
X_test = X[0:num_test,:]
X_train = X[num_test:, :]
plots_train = plots[num_test:]
plots_test = plots[0:num_test]
y_test = y[0:num_test]
y_train = y[num_test:]
# split dev/test
test_ratio = 0.2
X_tr, X_te, y_tr, y_te, plots_tr, plots_te = \
sklearn.cross_validation.train_test_split(X_train, y_train, plots_train, test_size = test_ratio)
return np.array(X_tr.todense()), np.array(X_te.todense()), np.array(X_test.todense()), \
np.array(y_tr), np.array(y_te), np.array(y_test), plots_tr, plots_te, plots_test
class DataLoader(object):
""" A class to load in appropriate numpy arrays
"""
def prune_features(self, val_primitive_matrix, train_primitive_matrix, thresh=0.01):
val_sum = np.sum(np.abs(val_primitive_matrix),axis=0)
train_sum = np.sum(np.abs(train_primitive_matrix),axis=0)
#Only select the indices that fire more than 1% for both datasets
train_idx = np.where((train_sum >= thresh*np.shape(train_primitive_matrix)[0]))[0]
val_idx = np.where((val_sum >= thresh*np.shape(val_primitive_matrix)[0]))[0]
common_idx = list(set(train_idx) & set(val_idx))
return common_idx
def load_data(self, dataset, data_path='./data/imdb/'):
#Parse Files
plots, labels = parse_file(data_path+'budgetandactors.txt')
#read_plots('imdb_plots.tsv')
#Featurize Plots
vectorizer = CountVectorizer(min_df=1, binary=True, \
decode_error='ignore', strip_accents='ascii', ngram_range=(1,2))
X = vectorizer.fit_transform(plots)
valid_feats = np.where(np.sum(X,0)> 2)[1]
X = X[:,valid_feats]
#Split Dataset into Train, Val, Test
train_primitive_matrix, val_primitive_matrix, test_primitive_matrix, \
train_ground, val_ground, test_ground, \
train_plots, val_plots, test_plots = split_data(X, plots, labels)
#Prune Feature Space
common_idx = self.prune_features(val_primitive_matrix, train_primitive_matrix)
return train_primitive_matrix[:,common_idx], val_primitive_matrix[:,common_idx], test_primitive_matrix[:,common_idx], \
np.array(train_ground), np.array(val_ground), np.array(test_ground), \
train_plots, val_plots, test_plots
| reef-master | data/loader.py |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
#top_words=5000
def lstm_simple(train_text, y_train, test_text, y_test, bs=64, n=3):
#Label Processing
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
#Make Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(train_text)
tokenizer.fit_on_texts(test_text)
X_train = tokenizer.texts_to_sequences(train_text)
X_test = tokenizer.texts_to_sequences(test_text)
#Make embedding
max_sentence_length=500
X_train = sequence.pad_sequences(X_train, maxlen=max_sentence_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_sentence_length)
embedding_vector_length = 32
vocab_size=len(tokenizer.word_index) + 1
#Model Architecture
model = Sequential()
model.add(Embedding(vocab_size, embedding_vector_length, input_length=max_sentence_length))
model.add(LSTM(100))
model.add(Dense(1, activation="sigmoid"))
#Run the model!
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=n, batch_size=bs)
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
y_pred = model.predict(X_test, batch_size=1)
y_pred = np.array([x[0] for x in y_pred])
return y_pred
| reef-master | lstm/imdb_lstm.py |
reef-master | lstm/__init__.py |
|
import numpy as np
from scipy import sparse
def log_odds(p):
"""This is the logit function"""
return np.log(p / (1.0 - p))
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
return np.exp(l) / (1.0 + np.exp(l))
def sample_data(X, w, n_samples):
"""
Here we do Gibbs sampling over the decision variables (representing our objects), o_j
corresponding to the columns of X
The model is just logistic regression, e.g.
P(o_j=1 | X_{*,j}; w) = logit^{-1}(w \dot X_{*,j})
This can be calculated exactly, so this is essentially a noisy version of the exact calc...
"""
N, R = X.shape
t = np.zeros(N)
f = np.zeros(N)
# Take samples of random variables
idxs = np.round(np.random.rand(n_samples) * (N-1)).astype(int)
ct = np.bincount(idxs)
# Estimate probability of correct assignment
increment = np.random.rand(n_samples) < odds_to_prob(X[idxs, :].dot(w))
increment_f = -1. * (increment - 1)
t[idxs] = increment * ct[idxs]
f[idxs] = increment_f * ct[idxs]
return t, f
def exact_data(X, w, evidence=None):
"""
We calculate the exact conditional probability of the decision variables in
logistic regression; see sample_data
"""
t = odds_to_prob(X.dot(w))
if evidence is not None:
t[evidence > 0.0] = 1.0
t[evidence < 0.0] = 0.0
return t, 1-t
def transform_sample_stats(Xt, t, f, Xt_abs=None):
"""
Here we calculate the expected accuracy of each LF/feature
(corresponding to the rows of X) wrt to the distribution of samples S:
E_S[ accuracy_i ] = E_(t,f)[ \frac{TP + TN}{TP + FP + TN + FN} ]
= \frac{X_{i|x_{ij}>0}*t - X_{i|x_{ij}<0}*f}{t+f}
= \frac12\left(\frac{X*(t-f)}{t+f} + 1\right)
"""
if Xt_abs is None:
Xt_abs = sparse_abs(Xt) if sparse.issparse(Xt) else abs(Xt)
n_pred = Xt_abs.dot(t+f)
m = (1. / (n_pred + 1e-8)) * (Xt.dot(t) - Xt.dot(f))
p_correct = (m + 1) / 2
return p_correct, n_pred
class LabelAggregator(object):
"""LabelAggregator Object that learns the accuracies for the heuristics.
Copied from Snorkel v0.4 NaiveBayes Model with minor changes for simplicity"""
def __init__(self, bias_term=False):
self.w = None
self.bias_term = bias_term
def train(self, X, n_iter=1000, w0=None, rate=0.01, alpha=0.5, mu=1e-6, \
sample=False, n_samples=100, evidence=None, warm_starts=False, tol=1e-6, verbose=True):
"""
Perform SGD wrt the weights w
* n_iter: Number of steps of SGD
* w0: Initial value for weights w
* rate: I.e. the SGD step size
* alpha: Elastic net penalty mixing parameter (0=ridge, 1=lasso)
* mu: Elastic net penalty
* sample: Whether to sample or not
* n_samples: Number of samples per SGD step
* evidence: Ground truth to condition on
* warm_starts:
* tol: For testing for SGD convergence, i.e. stopping threshold
"""
self.X_train = X
# Set up stuff
N, M = X.shape
if verbose:
print "="*80
print "Training marginals (!= 0.5):\t%s" % N
print "Features:\t\t\t%s" % M
print "="*80
Xt = X.transpose()
Xt_abs = np.abs(Xt)
w0 = w0 if w0 is not None else np.ones(M)
# Initialize training
w = w0.copy()
g = np.zeros(M)
l = np.zeros(M)
g_size = 0
# Gradient descent
if verbose:
print "Begin training for rate={}, mu={}".format(rate, mu)
for step in range(n_iter):
# Get the expected LF accuracy
t,f = sample_data(X, w, n_samples=n_samples) if sample else exact_data(X, w, evidence)
p_correct, n_pred = transform_sample_stats(Xt, t, f, Xt_abs)
# Get the "empirical log odds"; NB: this assumes one is correct, clamp is for sampling...
l = np.clip(log_odds(p_correct), -10, 10)
# SGD step with normalization by the number of samples
g0 = (n_pred*(w - l)) / np.sum(n_pred)
# Momentum term for faster training
g = 0.95*g0 + 0.05*g
# Check for convergence
wn = np.linalg.norm(w, ord=2)
g_size = np.linalg.norm(g, ord=2)
if step % 250 == 0 and verbose:
print "\tLearning epoch = {}\tGradient mag. = {:.6f}".format(step, g_size)
if (wn < 1e-12 or g_size / wn < tol) and step >= 10:
if verbose:
print "SGD converged for mu={} after {} steps".format(mu, step)
break
# Update weights
w -= rate * g
# Apply elastic net penalty
w_bias = w[-1]
soft = np.abs(w) - mu
ridge_pen = (1 + (1-alpha) * mu)
# \ell_1 penalty by soft thresholding | \ell_2 penalty
w = (np.sign(w)*np.select([soft>0], [soft], default=0)) / ridge_pen
# Don't regularize the bias term
if self.bias_term:
w[-1] = w_bias
# SGD did not converge
else:
if verbose:
print "Final gradient magnitude for rate={}, mu={}: {:.3f}".format(rate, mu, g_size)
# Return learned weights
self.w = w
def marginals(self, X):
X = X.todense()
marginals = odds_to_prob(X.dot(self.w))
return np.array(marginals)[0]
| reef-master | program_synthesis/label_aggregator.py |
reef-master | program_synthesis/__init__.py |
|
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from program_synthesis.synthesizer import Synthesizer
from program_synthesis.verifier import Verifier
class HeuristicGenerator(object):
"""
A class to go through the synthesizer-verifier loop
"""
def __init__(self, train_primitive_matrix, val_primitive_matrix,
val_ground, train_ground=None, b=0.5):
"""
Initialize HeuristicGenerator object
b: class prior of most likely class (TODO: use somewhere)
beta: threshold to decide whether to abstain or label for heuristics
gamma: threshold to decide whether to call a point vague or not
"""
self.train_primitive_matrix = train_primitive_matrix
self.val_primitive_matrix = val_primitive_matrix
self.val_ground = val_ground
self.train_ground = train_ground
self.b = b
self.vf = None
self.syn = None
self.hf = []
self.feat_combos = []
def apply_heuristics(self, heuristics, primitive_matrix, feat_combos, beta_opt):
"""
Apply given heuristics to given feature matrix X and abstain by beta
heuristics: list of pre-trained logistic regression models
feat_combos: primitive indices to apply heuristics to
beta: best beta value for associated heuristics
"""
def marginals_to_labels(hf,X,beta):
marginals = hf.predict_proba(X)[:,1]
labels_cutoff = np.zeros(np.shape(marginals))
labels_cutoff[marginals <= (self.b-beta)] = -1.
labels_cutoff[marginals >= (self.b+beta)] = 1.
return labels_cutoff
L = np.zeros((np.shape(primitive_matrix)[0],len(heuristics)))
for i,hf in enumerate(heuristics):
L[:,i] = marginals_to_labels(hf,primitive_matrix[:,feat_combos[i]],beta_opt[i])
return L
def prune_heuristics(self,heuristics,feat_combos,keep=1):
"""
Selects the best heuristic based on Jaccard Distance and Reliability Metric
keep: number of heuristics to keep from all generated heuristics
"""
def calculate_jaccard_distance(num_labeled_total, num_labeled_L):
scores = np.zeros(np.shape(num_labeled_L)[1])
for i in range(np.shape(num_labeled_L)[1]):
scores[i] = np.sum(np.minimum(num_labeled_L[:,i],num_labeled_total))/np.sum(np.maximum(num_labeled_L[:,i],num_labeled_total))
return 1-scores
L_val = np.array([])
L_train = np.array([])
beta_opt = np.array([])
max_cardinality = len(heuristics)
for i in range(max_cardinality):
#Note that the LFs are being applied to the entire val set though they were developed on a subset...
beta_opt_temp = self.syn.find_optimal_beta(heuristics[i], self.val_primitive_matrix, feat_combos[i], self.val_ground)
L_temp_val = self.apply_heuristics(heuristics[i], self.val_primitive_matrix, feat_combos[i], beta_opt_temp)
L_temp_train = self.apply_heuristics(heuristics[i], self.train_primitive_matrix, feat_combos[i], beta_opt_temp)
beta_opt = np.append(beta_opt, beta_opt_temp)
if i == 0:
L_val = np.append(L_val, L_temp_val) #converts to 1D array automatically
L_val = np.reshape(L_val,np.shape(L_temp_val))
L_train = np.append(L_train, L_temp_train) #converts to 1D array automatically
L_train = np.reshape(L_train,np.shape(L_temp_train))
else:
L_val = np.concatenate((L_val, L_temp_val), axis=1)
L_train = np.concatenate((L_train, L_temp_train), axis=1)
#Use F1 trade-off for reliability
acc_cov_scores = [f1_score(self.val_ground, L_val[:,i], average='micro') for i in range(np.shape(L_val)[1])]
acc_cov_scores = np.nan_to_num(acc_cov_scores)
if self.vf != None:
#Calculate Jaccard score for diversity
train_num_labeled = np.sum(np.abs(self.vf.L_train.T), axis=0)
jaccard_scores = calculate_jaccard_distance(train_num_labeled,np.abs(L_train))
else:
jaccard_scores = np.ones(np.shape(acc_cov_scores))
#Weighting the two scores to find best heuristic
combined_scores = 0.5*acc_cov_scores + 0.5*jaccard_scores
sort_idx = np.argsort(combined_scores)[::-1][0:keep]
return sort_idx
def run_synthesizer(self, max_cardinality=1, idx=None, keep=1, model='lr'):
"""
Generates Synthesizer object and saves all generated heuristics
max_cardinality: max number of features candidate programs take as input
idx: indices of validation set to fit programs over
keep: number of heuristics to pass to verifier
model: train logistic regression ('lr') or decision tree ('dt')
"""
if idx == None:
primitive_matrix = self.val_primitive_matrix
ground = self.val_ground
else:
primitive_matrix = self.val_primitive_matrix[idx,:]
ground = self.val_ground[idx]
#Generate all possible heuristics
self.syn = Synthesizer(primitive_matrix, ground, b=self.b)
#Un-flatten indices
def index(a, inp):
i = 0
remainder = 0
while inp >= 0:
remainder = inp
inp -= len(a[i])
i+=1
try:
return a[i-1][remainder] #TODO: CHECK THIS REMAINDER THING WTF IS HAPPENING
except:
import pdb; pdb.set_trace()
#Select keep best heuristics from generated heuristics
hf, feat_combos = self.syn.generate_heuristics(model, max_cardinality)
sort_idx = self.prune_heuristics(hf,feat_combos, keep)
for i in sort_idx:
self.hf.append(index(hf,i))
self.feat_combos.append(index(feat_combos,i))
#create appended L matrices for validation and train set
beta_opt = self.syn.find_optimal_beta(self.hf, self.val_primitive_matrix, self.feat_combos, self.val_ground)
self.L_val = self.apply_heuristics(self.hf, self.val_primitive_matrix, self.feat_combos, beta_opt)
self.L_train = self.apply_heuristics(self.hf, self.train_primitive_matrix, self.feat_combos, beta_opt)
def run_verifier(self):
"""
Generates Verifier object and saves marginals
"""
###THIS IS WHERE THE SNORKEL FLAG IS SET!!!!
self.vf = Verifier(self.L_train, self.L_val, self.val_ground, has_snorkel=False)
self.vf.train_gen_model()
self.vf.assign_marginals()
def gamma_optimizer(self,marginals):
"""
Returns the best gamma parameter for abstain threshold given marginals
marginals: confidences for data from a single heuristic
"""
m = len(self.hf)
gamma = 0.5-(1/(m**(3/2.)))
return gamma
def find_feedback(self):
"""
Finds vague points according to gamma parameter
self.gamma: confidence past 0.5 that relates to a vague or incorrect point
"""
#TODO: flag for re-classifying incorrect points
#incorrect_idx = self.vf.find_incorrect_points(b=self.b)
gamma_opt = self.gamma_optimizer(self.vf.val_marginals)
#gamma_opt = self.gamma
vague_idx = self.vf.find_vague_points(b=self.b, gamma=gamma_opt)
incorrect_idx = vague_idx
self.feedback_idx = list(set(list(np.concatenate((vague_idx,incorrect_idx)))))
def evaluate(self):
"""
Calculate the accuracy and coverage for train and validation sets
"""
self.val_marginals = self.vf.val_marginals
self.train_marginals = self.vf.train_marginals
def calculate_accuracy(marginals, b, ground):
total = np.shape(np.where(marginals != 0.5))[1]
labels = np.sign(2*(marginals - 0.5))
return np.sum(labels == ground)/float(total)
def calculate_coverage(marginals, b, ground):
total = np.shape(np.where(marginals != 0.5))[1]
labels = np.sign(2*(marginals - 0.5))
return total/float(len(labels))
self.val_accuracy = calculate_accuracy(self.val_marginals, self.b, self.val_ground)
self.train_accuracy = calculate_accuracy(self.train_marginals, self.b, self.train_ground)
self.val_coverage = calculate_coverage(self.val_marginals, self.b, self.val_ground)
self.train_coverage = calculate_coverage(self.train_marginals, self.b, self.train_ground)
return self.val_accuracy, self.train_accuracy, self.val_coverage, self.train_coverage
def heuristic_stats(self):
'''For each heuristic, we want the following:
- idx of the features it relies on
- if dt, then the thresholds?
'''
def calculate_accuracy(marginals, b, ground):
total = np.shape(np.where(marginals != 0.5))[1]
labels = np.sign(2*(marginals - 0.5))
return np.sum(labels == ground)/float(total)
def calculate_coverage(marginals, b, ground):
total = np.shape(np.where(marginals != 0))[1]
labels = marginals
return total/float(len(labels))
stats_table = np.zeros((len(self.hf),6))
for i in range(len(self.hf)):
stats_table[i,0] = int(self.feat_combos[i][0])
try:
stats_table[i,1] = int(self.feat_combos[i][1])
except:
stats_table[i,1] = -1.
stats_table[i,2] = calculate_accuracy(self.L_val[:,i], self.b, self.val_ground)
stats_table[i,3] = calculate_accuracy(self.L_train[:,i], self.b, self.train_ground)
stats_table[i,4] = calculate_coverage(self.L_val[:,i], self.b, self.val_ground)
stats_table[i,5] = calculate_coverage(self.L_train[:,i], self.b, self.train_ground)
#Make table
column_headers = ['Feat 1', 'Feat 2', 'Val Acc', 'Train Acc', 'Val Cov', 'Train Cov']
pandas_stats_table = pd.DataFrame(stats_table, columns=column_headers)
return pandas_stats_table
| reef-master | program_synthesis/heuristic_generator.py |
import numpy as np
import itertools
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
class Synthesizer(object):
"""
A class to synthesize heuristics from primitives and validation labels
"""
def __init__(self, primitive_matrix, val_ground,b=0.5):
"""
Initialize Synthesizer object
b: class prior of most likely class
beta: threshold to decide whether to abstain or label for heuristics
"""
self.val_primitive_matrix = primitive_matrix
self.val_ground = val_ground
self.p = np.shape(self.val_primitive_matrix)[1]
self.b=b
def generate_feature_combinations(self, cardinality=1):
"""
Create a list of primitive index combinations for given cardinality
max_cardinality: max number of features each heuristic operates over
"""
primitive_idx = range(self.p)
feature_combinations = []
for comb in itertools.combinations(primitive_idx, cardinality):
feature_combinations.append(comb)
return feature_combinations
def fit_function(self, comb, model):
"""
Fits a single logistic regression or decision tree model
comb: feature combination to fit model over
model: fit logistic regression or a decision tree
"""
X = self.val_primitive_matrix[:,comb]
if np.shape(X)[0] == 1:
X = X.reshape(-1,1)
# fit decision tree or logistic regression or knn
if model == 'dt':
dt = DecisionTreeClassifier(max_depth=len(comb))
dt.fit(X,self.val_ground)
return dt
elif model == 'lr':
lr = LogisticRegression()
lr.fit(X,self.val_ground)
return lr
elif model == 'nn':
nn = KNeighborsClassifier(algorithm='kd_tree')
nn.fit(X,self.val_ground)
return nn
def generate_heuristics(self, model, max_cardinality=1):
"""
Generates heuristics over given feature cardinality
model: fit logistic regression or a decision tree
max_cardinality: max number of features each heuristic operates over
"""
#have to make a dictionary?? or feature combinations here? or list of arrays?
feature_combinations_final = []
heuristics_final = []
for cardinality in range(1, max_cardinality+1):
feature_combinations = self.generate_feature_combinations(cardinality)
heuristics = []
for i,comb in enumerate(feature_combinations):
heuristics.append(self.fit_function(comb, model))
feature_combinations_final.append(feature_combinations)
heuristics_final.append(heuristics)
return heuristics_final, feature_combinations_final
def beta_optimizer(self,marginals, ground):
"""
Returns the best beta parameter for abstain threshold given marginals
Uses F1 score that maximizes the F1 score
marginals: confidences for data from a single heuristic
"""
#Set the range of beta params
#0.25 instead of 0.0 as a min makes controls coverage better
beta_params = np.linspace(0.25,0.45,10)
f1 = []
for beta in beta_params:
labels_cutoff = np.zeros(np.shape(marginals))
labels_cutoff[marginals <= (self.b-beta)] = -1.
labels_cutoff[marginals >= (self.b+beta)] = 1.
f1.append(f1_score(ground, labels_cutoff, average='weighted'))
f1 = np.nan_to_num(f1)
return beta_params[np.argsort(np.array(f1))[-1]]
def find_optimal_beta(self, heuristics, X, feat_combos, ground):
"""
Returns optimal beta for given heuristics
heuristics: list of pre-trained logistic regression models
X: primitive matrix
feat_combos: feature indices to apply heuristics to
ground: ground truth associated with X data
"""
beta_opt = []
for i,hf in enumerate(heuristics):
marginals = hf.predict_proba(X[:,feat_combos[i]])[:,1]
labels_cutoff = np.zeros(np.shape(marginals))
beta_opt.append((self.beta_optimizer(marginals, ground)))
return beta_opt
| reef-master | program_synthesis/synthesizer.py |
import numpy as np
from scipy import sparse
from label_aggregator import LabelAggregator
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
return np.exp(l) / (1.0 + np.exp(l))
class Verifier(object):
"""
A class for the Snorkel Model Verifier
"""
def __init__(self, L_train, L_val, val_ground, has_snorkel=True):
self.L_train = L_train.astype(int)
self.L_val = L_val.astype(int)
self.val_ground = val_ground
self.has_snorkel = has_snorkel
if self.has_snorkel:
from snorkel.learning import GenerativeModel
from snorkel.learning import RandomSearch
from snorkel.learning.structure import DependencySelector
def train_gen_model(self,deps=False,grid_search=False):
"""
Calls appropriate generative model
"""
if self.has_snorkel:
#TODO: GridSearch
from snorkel.learning import GenerativeModel
from snorkel.learning import RandomSearch
from snorkel.learning.structure import DependencySelector
gen_model = GenerativeModel()
gen_model.train(self.L_train, epochs=100, decay=0.001 ** (1.0 / 100), step_size=0.005, reg_param=1.0)
else:
gen_model = LabelAggregator()
gen_model.train(self.L_train, rate=1e-3, mu=1e-6, verbose=False)
self.gen_model = gen_model
def assign_marginals(self):
"""
Assigns probabilistic labels for train and val sets
"""
self.train_marginals = self.gen_model.marginals(sparse.csr_matrix(self.L_train))
self.val_marginals = self.gen_model.marginals(sparse.csr_matrix(self.L_val))
#print 'Learned Accuracies: ', odds_to_prob(self.gen_model.w)
def find_vague_points(self,gamma=0.1,b=0.5):
"""
Find val set indices where marginals are within thresh of b
"""
val_idx = np.where(np.abs(self.val_marginals-b) <= gamma)
return val_idx[0]
def find_incorrect_points(self,b=0.5):
""" Find val set indices where marginals are incorrect """
val_labels = 2*(self.val_marginals > b)-1
val_idx = np.where(val_labels != self.val_ground)
return val_idx[0] | reef-master | program_synthesis/verifier.py |
import ray
from database import Database, save_results_to_es
from utils.experiment_utils import *
# from experiment_driver import map_runstats_to_modelpath
import pickle
import os
import json
from utils.metadata_utils import append_experiment_metadata
ray.init(address="auto")
datasets = ["agnews"]
encoders = ["rnn", "distilbert", "t5", "electra"]
elastic_config_file = "./elasticsearch_config.yaml"
paths_to_dataset = {
"agnews": "/experiments/ludwig-bench-textclassification/data/agnews_1.0/processed/agnews.csv"
}
def main():
elastic_config = None
elastic_config = load_yaml(elastic_config_file)
exp_info = []
for dataset in datasets:
for enc in encoders:
path_to_stats_file = f"/experiments/ludwig-bench-textclassification/experiment-outputs/{dataset}_{enc}/{dataset}_{enc}_hyperopt_results.pkl"
path_to_output_dir = f"/experiments/ludwig-bench-textclassification/experiment-outputs/{dataset}_{enc}/"
path_to_model_config = f"/experiments/ludwig-bench-textclassification/experiment-configs/config_{dataset}_{enc}.yaml"
model_config = load_yaml(path_to_model_config)
path_to_dataset = paths_to_dataset[dataset]
experiment_attr = {
"model_config": copy.deepcopy(model_config),
"dataset_path": path_to_dataset,
"top_n_trials": None,
"model_name": f"config_{dataset}_{enc}",
"output_dir": path_to_output_dir,
"encoder": enc,
"dataset": dataset,
"elastic_config": elastic_config,
}
hyperopt_results = pickle.load(open(path_to_stats_file, "rb"))
exp_info.append((experiment_attr, hyperopt_results))
outputs = ray.get(
[
save_results_to_es.remote(info[0], info[1], "ray")
for info in exp_info
]
)
if __name__ == "__main__":
main() | ludwig-benchmarking-toolkit-main | upload_to_db.py |
import os
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
ENCODER_CONFIG_DIR = os.path.join(PATH_HERE, "model-configs")
# EXPERIMENT_CONFIGS_DIR = '/experiments/ludwig-bench-textclassification/experiment-configs'
EXPERIMENT_CONFIGS_DIR = os.path.join(PATH_HERE, "hyperopt-experiment-configs")
DATASET_CACHE_DIR = os.path.join(PATH_HERE,"datasets")
ENERGY_LOGGING_DIR = os.path.join(PATH_HERE, "energy_logging")
ENCODER_HYPEROPT_FILENAMES = {
"bert": "bert_hyperopt.yaml",
"rnn": "rnn_hyperopt.yaml",
"distilbert": "distilbert_hyperopt.yaml",
"electra": "electra_hyperopt.yaml",
"roberta": "roberta_hyperopt.yaml",
"stacked_parallel_cnn": "stackedparallelcnn_hyperopt.yaml",
"t5": "t5_hyperopt.yaml",
"resnet" : "resnet_hyperopt.yaml",
"stacked_cnn" : "stackedcnn_hyperopt.yaml"
}
ENCODER_FILE_LIST = ENCODER_HYPEROPT_FILENAMES.values()
DATASETS_LIST = None
CONFIG_TEMPLATE_FILE = "./experiment-templates/task_template.yaml"
DATASET_METADATA_FILE = "./experiment-templates/dataset_metadata.yaml"
HYPEROPT_CONFIG_FILE = "./experiment-templates/hyperopt_config.yaml"
EXPERIMENT_OUTPUT_DIR = "./experiment-outputs"
PATH_TO_PRETRAINED_EMBEDDINGS = None
RUNTIME_ENV = "local"
| ludwig-benchmarking-toolkit-main | globals.py |
import copy
import json
import logging
import os
import ray
import socket
from elasticsearch import Elasticsearch
from lbt.utils.experiment_utils import (
format_fields_float,
get_model_ckpt_paths,
hash_dict,
substitute_dict_parameters,
)
# from utils.metadata_utils import append_experiment_metadata
from lbt.metrics import get_experiment_metadata
hostname = socket.gethostbyname(socket.gethostname())
# TODO: ASN --> DECOUPLE BUILDING ES DOCUMENT W/SAVING
@ray.remote(num_cpus=0, resources={f"node:{hostname}": 0.001})
def save_results_to_es(
experiment_attr: dict,
hyperopt_results: list,
tune_executor: str,
top_n_trials: int = None,
reupload=False,
num_gpus=0,
):
elastic_config = experiment_attr["elastic_config"]
es_db = Database(
elastic_config["host"],
(elastic_config["username"], elastic_config["password"]),
elastic_config["username"],
elastic_config["index"],
)
# save top_n model configs to elastic
if top_n_trials is not None and len(hyperopt_results) > top_n_trials:
hyperopt_results = hyperopt_results[0:top_n_trials]
hyperopt_run_data = get_model_ckpt_paths(
hyperopt_results, experiment_attr["output_dir"], executor=tune_executor
)
sampled_params = {}
# ensures that all numerical values are of type float
format_fields_float(hyperopt_results)
for run in hyperopt_run_data:
new_config = substitute_dict_parameters(
copy.deepcopy(experiment_attr["model_config"]),
parameters=run["hyperopt_results"]["parameters"],
)
del new_config["hyperopt"]
# do some accounting of duplicate hyperparam configs (this count will
# be added to the dict which will be hashed for the elastic document
# id
param_hash = hash_dict(run["hyperopt_results"]["parameters"])
if param_hash in sampled_params:
sampled_params[param_hash] += 1
else:
sampled_params[param_hash] = 1
document = {
"hyperopt_results": run["hyperopt_results"],
"model_path": run["model_path"],
}
try:
get_experiment_metadata(
document,
model_path=run["model_path"],
data_path=experiment_attr["dataset_path"],
run_stats=run,
num_gpus=num_gpus,
)
except:
pass
formatted_document = es_db.format_document(
document,
encoder=experiment_attr["encoder"],
dataset=experiment_attr["dataset"],
config=experiment_attr["model_config"],
)
formatted_document["sampled_run_config"] = new_config
ds = experiment_attr["dataset"]
enc = experiment_attr["encoder"]
# doc_key = run["hyperopt_results"]["eval_stats"]
trial_count = sampled_params[param_hash]
doc_key = copy.deepcopy(new_config)
doc_key["trial"] = trial_count
try:
es_db.upload_document(hash_dict(doc_key), formatted_document)
logging.info(f"{ds} x {enc}" f"uploaded to elastic.")
except:
logging.warning(
f"error uploading" f"{ds} x {enc}" f"to elastic..."
)
return 1
class Database:
def __init__(self, host, http_auth, user_id, index):
self.host = host
self.http_auth = http_auth
self.user_id = user_id
self.index = index
self._initialize_db()
self._create_index(self.index)
def _initialize_db(self):
self.es_connection = Elasticsearch(
[self.host], http_auth=self.http_auth
)
def _create_index(self, index_name: str):
mapping = {
"mappings": {
"_doc": {
"properties": {"sampled_run_config": {"type": "nested"}}
}
}
}
self.es_connection.indices.create(
index=index_name, body=mapping, include_type_name=True, ignore=400
)
def upload_document(self, id, document):
self.es_connection.index(index=self.index, id=id, body=document)
def remove_document(self, id):
self.es_connection.delete(index=self.index, id=id)
def document_exists(self, id):
return self.es_connection.exists(index=self.index, id=id)
def search(self, query, size=1000):
return self.es_connection.search(
index=self.index, body=query, size=size
)
def upload_document_from_outputdir(
self,
dir_path,
encoder,
dataset,
):
hyperopt_stats = json.load(
open(os.path.join(dir_path, "hyperopt_statistics.json"), "rb"),
parse_int=float,
)
formatted_document = self.format_document(
hyperopt_stats, encoder, dataset
)
self.es_connection.index(
index=self.index,
id=hash_dict(hyperopt_stats["hyperopt_config"]),
body=formatted_document,
)
def format_document(self, document, encoder, dataset, config=None):
formatted_document = {
"user_id": self.user_id,
"encoder": encoder,
"dataset": dataset,
}
formatted_document.update(document)
if config is not None:
formatted_document.update({"hyperopt_exp_config": config})
return formatted_document
| ludwig-benchmarking-toolkit-main | database.py |
import argparse
import datetime
import logging
import ray
import globals
from lbt.utils.experiment_utils import set_globals, load_yaml
from lbt.experiments import (
run_experiments,
reproduce_experiment,
download_data,
)
from lbt.datasets import DATASET_REGISTRY
from lbt.experiments import (
run_experiments,
reproduce_experiment,
download_data,
)
import lbt.build_def_files
from lbt.build_def_files import build_config_files
logging.basicConfig(
format=logging.basicConfig(
format="[\N{books} LUDWIG-BENCHMARKING-TOOLKIT \N{books}] => %(levelname)s::%(message)s",
level=logging.DEBUG,
),
level=logging.DEBUG,
)
def main():
parser = argparse.ArgumentParser(
description="Ludwig Benchmarking Toolkit experiment driver script",
)
parser.add_argument(
"-hcd",
"--hyperopt_config_dir",
help="directory to save all model config",
type=str,
default=globals.EXPERIMENT_CONFIGS_DIR,
)
parser.add_argument(
"--resume_existing_exp",
help="resume a previously stopped experiment",
type=bool,
default=False,
)
parser.add_argument(
"-eod",
"--experiment_output_dir",
help="directory to save hyperopt runs",
type=str,
default=globals.EXPERIMENT_OUTPUT_DIR,
)
parser.add_argument(
"--datasets",
help="list of datasets to run experiemnts on",
nargs="+",
choices=list(DATASET_REGISTRY.keys()),
default=None,
required=True,
)
parser.add_argument(
"-re",
"--run_environment",
help="environment in which experiment will be run",
choices=["local", "gcp"],
default="local",
)
parser.add_argument(
"-esc",
"--elasticsearch_config",
help="path to elastic db config file",
type=str,
default=None,
)
parser.add_argument(
"-dcd",
"--dataset_cache_dir",
help="path to cache downloaded datasets",
type=str,
default=globals.DATASET_CACHE_DIR,
)
# list of encoders to run hyperopt search over :
# default is 23 ludwig encoders
parser.add_argument(
"-mel",
"--custom_model_list",
help="list of encoders to run hyperopt experiments on. \
The default setting is to use all 23 Ludwig encoders",
nargs="+",
choices=[
"all",
"bert",
"rnn",
"stacked_parallel_cnn",
"roberta",
"distilbert",
"electra",
"resnet",
"stacked_cnn",
"t5",
],
default="all",
)
parser.add_argument(
"-topn",
"--top_n_trials",
help="top n trials to save model performance for.",
type=int,
default=None,
)
parser.add_argument(
"-reproduce",
"--experiment_to_reproduce",
help="path to LBT experiment config to reproduce and experiment",
type=str,
default=None,
)
args = parser.parse_args()
set_globals(args)
data_file_paths = download_data(args.dataset_cache_dir, args.datasets)
logging.info("Datasets succesfully downloaded...")
config_files = build_config_files()
logging.info("Experiment configuration files built...")
elastic_config = None
if args.elasticsearch_config is not None:
elastic_config = load_yaml(args.elasticsearch_config)
experiment_config = None
if args.experiment_to_reproduce is not None:
experiment_config = load_yaml(args.experiment_to_reproduce)
if args.run_environment == "gcp":
ray.init(address="auto")
if experiment_config:
reproduce_experiment(
model=args.custom_model_list[0],
dataset=args.datasets[0],
data_file_paths=data_file_paths,
experiment_to_replicate=args.experiment_to_reproduce,
run_environment=args.run_environment,
)
else:
run_experiments(
data_file_paths,
config_files,
top_n_trials=args.top_n_trials,
elastic_config=elastic_config,
run_environment=args.run_environment,
resume_existing_exp=args.resume_existing_exp,
)
if __name__ == "__main__":
main()
| ludwig-benchmarking-toolkit-main | experiment_driver.py |
__version__ = "0.3.0.post1"
| ludwig-benchmarking-toolkit-main | lbt/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.