code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def logerror(logger: logging.Logger = logging.root):
"""A decorator that wraps the passed in function and logs exceptions.
Parameters
----------
logger: logging.Logger
The logger to which to log the error.
"""
def log_wrapper(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e:
# log the exception
logger.exception(
f'{function.__name__}(args={args}, kwargs={kwargs}) failed:\n{e}.')
raise e
return wrapper
return log_wrapper | A decorator that wraps the passed in function and logs exceptions.
Parameters
----------
logger: logging.Logger
The logger to which to log the error.
| logerror | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue) | Collect data into fixed-length chunks or blocks | grouper | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def repeat(iterable, count=None):
"""Repeat a basic iterator for multiple rounds
Parameters
----------
iterable
The basic iterable
count
Repeat the basic iterable for "count" times. If it is None, it will be an infinite iterator.
Returns
-------
new_iterable
A new iterable in which the basic iterator has been repeated for multiple rounds.
"""
if count is None:
while True:
for sample in iterable:
yield sample
else:
for i in range(count):
for sample in iterable:
yield sample | Repeat a basic iterator for multiple rounds
Parameters
----------
iterable
The basic iterable
count
Repeat the basic iterable for "count" times. If it is None, it will be an infinite iterator.
Returns
-------
new_iterable
A new iterable in which the basic iterator has been repeated for multiple rounds.
| repeat | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def load_checksum_stats(path: str) -> dict:
"""
Parameters
----------
path
Path to the stored checksum
Returns
-------
file_stats
"""
file_stats = dict()
with open(path, 'r', encoding='utf-8') as f:
for line in f:
name, hex_hash, file_size = line.strip().split()
file_stats[name] = hex_hash
if name[8:27] == 'gluonnlp-numpy-data':
new_name = name.replace('https://gluonnlp-numpy-data.s3-accelerate.amazonaws.com', 's3://gluonnlp-numpy-data')
file_stats[new_name] = hex_hash
return file_stats |
Parameters
----------
path
Path to the stored checksum
Returns
-------
file_stats
| load_checksum_stats | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def download_file_from_google_drive(file_id, dest_path, overwrite=False, showsize=False):
"""Downloads a shared file from google drive into a given folder.
Optionally unzips it.
Parameters
----------
file_id: str
the file identifier.
You can obtain it from the sharable link.
dest_path: str
the destination where to save the downloaded file.
Must be a path (for example: './downloaded_file.txt')
overwrite: bool
optional, if True forces re-download and overwrite.
showsize: bool
optional, if True print the current download size.
"""
destination_directory = os.path.dirname(dest_path)
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
if not os.path.exists(dest_path) or overwrite:
session = requests.Session()
print('Downloading {} into {}... '.format(file_id, dest_path), end='')
sys.stdout.flush()
response = session.get(GoogleDriveDownloader.DOWNLOAD_URL,
params={'id': file_id}, stream=True)
token = GoogleDriveDownloader._get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(GoogleDriveDownloader.DOWNLOAD_URL,
params=params, stream=True)
if showsize:
print() # Skip to the next line
current_download_size = [0]
GoogleDriveDownloader._save_response_content(response, dest_path, showsize,
current_download_size)
print('Done.') | Downloads a shared file from google drive into a given folder.
Optionally unzips it.
Parameters
----------
file_id: str
the file identifier.
You can obtain it from the sharable link.
dest_path: str
the destination where to save the downloaded file.
Must be a path (for example: './downloaded_file.txt')
overwrite: bool
optional, if True forces re-download and overwrite.
showsize: bool
optional, if True print the current download size.
| download_file_from_google_drive | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def download(url: str,
path: Optional[str] = None,
overwrite: Optional[bool] = False,
sha1_hash: Optional[str] = None,
retries: Optional[int] = 5,
verify_ssl: Optional[bool] = True,
anonymous_credential: Optional[bool] = True) -> str:
"""Download a given URL
Parameters
----------
url
URL to download
path
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite
Whether to overwrite destination file if already exists.
sha1_hash
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl
Verify SSL certificates.
anonymous_credential
Whether to force to use anonymous credential if the path is from S3.
Returns
-------
fname
The file path of the downloaded file.
"""
is_s3 = url.startswith(S3_PREFIX)
if is_s3:
boto3, botocore = try_import_boto3()
s3 = boto3.resource('s3')
if boto3.session.Session().get_credentials() is None or anonymous_credential:
from botocore.handlers import disable_signing
s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
components = url[len(S3_PREFIX):].split('/')
if len(components) < 2:
raise ValueError('Invalid S3 url. Received url={}'.format(url))
s3_bucket_name = components[0]
s3_key = '/'.join(components[1:])
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format(
retries)
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
if is_s3:
response = s3.meta.client.head_object(Bucket=s3_bucket_name,
Key=s3_key)
total_size = int(response.get('ContentLength', 0))
random_uuid = str(uuid.uuid4())
tmp_path = '{}.{}'.format(fname, random_uuid)
if tqdm is not None:
def hook(t_obj):
def inner(bytes_amount):
t_obj.update(bytes_amount)
return inner
with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True) as t:
s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path,
Callback=hook(t))
else:
s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path)
else:
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
# create uuid for temporary files
random_uuid = str(uuid.uuid4())
total_size = int(r.headers.get('content-length', 0))
chunk_size = 1024
if tqdm is not None:
t = tqdm.tqdm(total=total_size, unit='iB', unit_scale=True)
with open('{}.{}'.format(fname, random_uuid), 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
if tqdm is not None:
t.update(len(chunk))
f.write(chunk)
if tqdm is not None:
t.close()
# if the target file exists(created by other processes)
# and have the same hash with target file
# delete the temporary file
if not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):
# atomic operation in the same file system
replace_file('{}.{}'.format(fname, random_uuid), fname)
else:
try:
os.remove('{}.{}'.format(fname, random_uuid))
except OSError:
pass
finally:
warnings.warn(
'File {} exists in file system so the downloaded file is deleted'.format(fname))
if sha1_hash and not sha1sum(fname) == sha1_hash:
raise UserWarning(
'File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return fname | Download a given URL
Parameters
----------
url
URL to download
path
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite
Whether to overwrite destination file if already exists.
sha1_hash
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl
Verify SSL certificates.
anonymous_credential
Whether to force to use anonymous credential if the path is from S3.
Returns
-------
fname
The file path of the downloaded file.
| download | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def check_version(min_version: str,
warning_only: bool = False,
library: Optional[ModuleType] = None):
"""Check the version of gluonnlp satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version
Minimum version
warning_only
Printing a warning instead of throwing an exception.
library
The target library for version check. Checks gluonnlp by default
"""
# pylint: disable=import-outside-toplevel
from .. import __version__
if library is None:
version = __version__
name = 'GluonNLP'
else:
version = library.__version__
name = library.__name__
from packaging.version import parse
bad_version = parse(version.replace('.dev', '')) < parse(min_version)
if bad_version:
msg = 'Installed {} version {} does not satisfy the ' \
'minimum required version {}'.format(name, version, min_version)
if warning_only:
warnings.warn(msg)
else:
raise AssertionError(msg) | Check the version of gluonnlp satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version
Minimum version
warning_only
Printing a warning instead of throwing an exception.
library
The target library for version check. Checks gluonnlp by default
| check_version | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def init_comm(backend, gpus):
"""Init communication backend
Parameters
----------
backend
The communication backend
gpus
Returns
-------
store
The kvstore
num_workers
The total number of workers
rank
local_rank
is_master_node
ctx_l
"""
# backend specific implementation
import mxnet as mx
if backend == 'horovod':
try:
import horovod.mxnet as hvd # pylint: disable=import-outside-toplevel
except ImportError:
logging.info('horovod must be installed.')
sys.exit(1)
hvd.init()
store = None
num_workers = hvd.size()
rank = hvd.rank()
local_rank = hvd.local_rank()
is_master_node = rank == local_rank
ctx_l = [mx.gpu(local_rank)]
logging.info('GPU communication supported by horovod')
else:
store = mx.kv.create(backend)
num_workers = store.num_workers
rank = store.rank
local_rank = 0
is_master_node = rank == local_rank
if gpus == '-1' or gpus == '':
ctx_l = [mx.cpu()]
logging.info('Runing on CPU')
else:
ctx_l = [mx.gpu(int(x)) for x in gpus.split(',')]
logging.info('GPU communication supported by KVStore')
return store, num_workers, rank, local_rank, is_master_node, ctx_l | Init communication backend
Parameters
----------
backend
The communication backend
gpus
Returns
-------
store
The kvstore
num_workers
The total number of workers
rank
local_rank
is_master_node
ctx_l
| init_comm | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def get_mxnet_visible_ctx():
"""Get the visible contexts in MXNet.
- If GPU is available
it will return all the visible GPUs, which can be controlled via "CUDA_VISIBLE_DEVICES".
- If no GPU is available
it will return the cpu device.
Returns
-------
ctx_l
The recommended contexts to use for MXNet
"""
import mxnet as mx
num_gpus = mx.context.num_gpus()
if num_gpus == 0:
ctx_l = [mx.cpu()]
else:
ctx_l = [mx.gpu(i) for i in range(num_gpus)]
return ctx_l | Get the visible contexts in MXNet.
- If GPU is available
it will return all the visible GPUs, which can be controlled via "CUDA_VISIBLE_DEVICES".
- If no GPU is available
it will return the cpu device.
Returns
-------
ctx_l
The recommended contexts to use for MXNet
| get_mxnet_visible_ctx | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def __init__(self, params=None):
"""Maintain a set of shadow variables "v" that is calculated by
v[:] = (1 - 1/t) v + 1/t \theta
The t is the number of training steps.
It is also known as "Polyak-Rupert averaging" applied to SGD and was rediscovered in
"Towards Optimal One Pass Large Scale Learning withAveraged Stochastic Gradient Descent"
Wei Xu (2011).
The idea is to average the parameters obtained by stochastic gradient descent.
Parameters
----------
params : ParameterDict
The parameters that we are going to track.
"""
self._track_params = None
self._average_params = None
self._initialized = False
self._n_steps = 0
if params is not None:
self.apply(params) | Maintain a set of shadow variables "v" that is calculated by
v[:] = (1 - 1/t) v + 1/t heta
The t is the number of training steps.
It is also known as "Polyak-Rupert averaging" applied to SGD and was rediscovered in
"Towards Optimal One Pass Large Scale Learning withAveraged Stochastic Gradient Descent"
Wei Xu (2011).
The idea is to average the parameters obtained by stochastic gradient descent.
Parameters
----------
params : ParameterDict
The parameters that we are going to track.
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def apply(self, params):
""" Tell the moving average tracker which parameters we are going to track.
Parameters
----------
params : ParameterDict
The parameters that we are going to track and calculate the moving average.
"""
assert self._track_params is None, 'The MovingAverageTracker is already initialized and'\
' is not allowed to be initialized again. '
self._track_params = deduplicate_param_dict(params)
self._n_steps = 0 | Tell the moving average tracker which parameters we are going to track.
Parameters
----------
params : ParameterDict
The parameters that we are going to track and calculate the moving average.
| apply | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def copy_back(self, params=None):
""" Copy the average parameters back to the given parameters
Parameters
----------
params : ParameterDict
The parameters that we will copy tha average params to.
If it is not given, the tracked parameters will be updated
"""
if params is None:
params = self._track_params
for k, v in self._average_params.items():
params[k].set_data(v) | Copy the average parameters back to the given parameters
Parameters
----------
params : ParameterDict
The parameters that we will copy tha average params to.
If it is not given, the tracked parameters will be updated
| copy_back | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def grad_global_norm(parameters: Iterable[Parameter]) -> float:
"""Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`, if `max_norm` if provided.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm = grad_global_norm(net.collect_params().values())
...
Parameters
----------
parameters
The list of Parameters
Returns
-------
total_norm
Total norm. It's a numpy scalar.
"""
# Distribute gradients among contexts,
# For example, assume there are 8 weights and four GPUs, we can ask each GPU to
# compute the squared sum of two weights and then add the results together
idx = 0
arrays = defaultdict(list)
sum_norms = []
num_ctx = None
param_uuid_set = set()
for p in parameters:
if p._uuid in param_uuid_set:
continue
param_uuid_set.add(p._uuid)
if p.grad_req != 'null':
p_grads = p.list_grad()
if num_ctx is None:
num_ctx = len(p_grads)
else:
assert num_ctx == len(p_grads)
arrays[idx % num_ctx].append(p_grads[idx % num_ctx])
idx += 1
assert len(arrays) > 0, 'No parameter found available for gradient norm.'
# TODO(sxjscience)
# Investigate the float16 case.
# The inner computation accumulative type of norm should be float32.
ctx = arrays[0][0].context
for idx, arr_l in enumerate(arrays.values()):
sum_norm = mx.np.linalg.norm(mx.np.concatenate([mx.np.ravel(ele) for ele in arr_l]))
sum_norms.append(sum_norm.as_in_ctx(ctx))
# Reduce over ctx
if num_ctx == 1:
total_norm = sum_norms[0]
else:
total_norm = mx.np.linalg.norm(mx.np.concatenate(sum_norms, axis=None))
total_norm = float(total_norm)
return total_norm | Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`, if `max_norm` if provided.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
norm = grad_global_norm(net.collect_params().values())
...
Parameters
----------
parameters
The list of Parameters
Returns
-------
total_norm
Total norm. It's a numpy scalar.
| grad_global_norm | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def clip_grad_global_norm(parameters: Iterable[Parameter],
max_norm: float,
check_isfinite: bool = True) -> Tuple[float, float, bool]:
"""Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
In cases where training happens on multiple contexts, this method should be used in
conjunction with ``trainer.allreduce_grads()`` and ``trainer.update()``.
(**not** ``trainer.step()``)
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size)
...
Parameters
----------
parameters
The list of parameters to calculate the norm
max_norm
If the gradient norm is larger than max_norm, it will be clipped to have max_norm
check_isfinite
If True, check whether the total_norm is finite (not nan or inf).
Returns
-------
total_norm
The total norm
ratio
The expected clipping ratio: grad = grad / ratio
It will be calculated as max(total_norm / max_norm, 1)
is_finite
Whether the total norm is finite
"""
total_norm = grad_global_norm(parameters)
is_finite = bool(np.isfinite(total_norm))
ratio = np.maximum(1, total_norm / max_norm)
if check_isfinite and not is_finite:
warnings.warn(
UserWarning('nan or inf is detected. Clipping results will be undefined.'
' Thus, skip clipping'),
stacklevel=2)
return total_norm, ratio, is_finite
scale = 1 / ratio
param_uuid_set = set()
for p in parameters:
if p._uuid in param_uuid_set:
continue
param_uuid_set.add(p._uuid)
if p.grad_req != 'null':
for arr in p.list_grad():
arr *= scale
return total_norm, ratio, is_finite | Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
This function is only for use when `update_on_kvstore` is set to False in trainer.
In cases where training happens on multiple contexts, this method should be used in
conjunction with ``trainer.allreduce_grads()`` and ``trainer.update()``.
(**not** ``trainer.step()``)
Example::
trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...)
for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]):
with mx.autograd.record():
y = net(x)
loss = loss_fn(y, label)
loss.backward()
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(net.collect_params().values(), max_norm)
trainer.update(batch_size)
...
Parameters
----------
parameters
The list of parameters to calculate the norm
max_norm
If the gradient norm is larger than max_norm, it will be clipped to have max_norm
check_isfinite
If True, check whether the total_norm is finite (not nan or inf).
Returns
-------
total_norm
The total norm
ratio
The expected clipping ratio: grad = grad / ratio
It will be calculated as max(total_norm / max_norm, 1)
is_finite
Whether the total norm is finite
| clip_grad_global_norm | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def move_to_ctx(arr, ctx):
"""Move a nested structure of array to the given context
Parameters
----------
arr
The input array
ctx
The MXNet context
Returns
-------
new_arr
The array that has been moved to context
"""
if isinstance(arr, tuple):
return tuple(move_to_ctx(ele, ctx) for ele in arr)
elif isinstance(arr, list):
return [move_to_ctx(ele, ctx) for ele in arr]
else:
return None if arr is None else arr.as_in_ctx(ctx) | Move a nested structure of array to the given context
Parameters
----------
arr
The input array
ctx
The MXNet context
Returns
-------
new_arr
The array that has been moved to context
| move_to_ctx | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def deduplicate_param_dict(param_dict):
"""Get a parameter dict that has been deduplicated
Parameters
----------
param_dict
The parameter dict returned by `model.collect_params()`
Returns
-------
dedup_param_dict
"""
dedup_param_dict = dict()
param_uuid_set = set()
for k in sorted(param_dict.keys()):
v = param_dict[k]
if v._uuid in param_uuid_set:
continue
dedup_param_dict[k] = v
param_uuid_set.add(v._uuid)
return dedup_param_dict | Get a parameter dict that has been deduplicated
Parameters
----------
param_dict
The parameter dict returned by `model.collect_params()`
Returns
-------
dedup_param_dict
| deduplicate_param_dict | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def count_parameters(params) -> Tuple[int, int]:
"""
Parameters
----------
params
The input parameter dict
Returns
-------
num_params
The number of parameters that requires gradient
num_fixed_params
The number of parameters that does not require gradient
"""
num_params = 0
num_fixed_params = 0
param_uuid_set = set()
for k, v in params.items():
if v._uuid in param_uuid_set:
continue
param_uuid_set.add(v._uuid)
if v.grad_req != 'null':
if v._data is None:
warnings.warn('"{}" is not initialized! The total parameter count '
'will not be correct.'.format(k))
else:
num_params += np.prod(v.shape)
else:
if v._data is None:
warnings.warn('"{}" is not initialized! The total fixed parameter count '
'will not be correct.'.format(k))
else:
num_fixed_params += np.prod(v.shape)
return num_params, num_fixed_params |
Parameters
----------
params
The input parameter dict
Returns
-------
num_params
The number of parameters that requires gradient
num_fixed_params
The number of parameters that does not require gradient
| count_parameters | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def get_trimmed_lengths(lengths: List[int],
max_length: int,
do_merge: bool = False) -> np.ndarray:
"""Get the trimmed lengths of multiple text data. It will make sure that
the trimmed length is smaller than or equal to the max_length
- do_merge is True
Make sure that sum(trimmed_lengths) <= max_length.
The strategy is to always try to trim the longer lengths.
- do_merge is False
Make sure that all(trimmed_lengths <= max_length)
Parameters
----------
lengths
The original lengths of each sample
max_length
When do_merge is True,
We set the max_length constraint on the total length.
When do_merge is False,
We set the max_length constraint on individual sentences.
do_merge
Whether these sentences will be merged
Returns
-------
trimmed_lengths
The trimmed lengths of the sequences.
"""
lengths = np.array(lengths)
if do_merge:
total_length = sum(lengths)
if total_length <= max_length:
return lengths
trimmed_lengths = np.zeros_like(lengths)
while sum(trimmed_lengths) != max_length:
remainder = max_length - sum(trimmed_lengths)
budgets = lengths - trimmed_lengths
nonzero_idx = (budgets > 0).nonzero()[0]
nonzero_budgets = budgets[nonzero_idx]
if remainder // len(nonzero_idx) == 0:
for i in range(remainder):
trimmed_lengths[nonzero_idx[i]] += 1
else:
increment = min(min(nonzero_budgets), remainder // len(nonzero_idx))
trimmed_lengths[nonzero_idx] += increment
return trimmed_lengths
else:
return np.minimum(lengths, max_length) | Get the trimmed lengths of multiple text data. It will make sure that
the trimmed length is smaller than or equal to the max_length
- do_merge is True
Make sure that sum(trimmed_lengths) <= max_length.
The strategy is to always try to trim the longer lengths.
- do_merge is False
Make sure that all(trimmed_lengths <= max_length)
Parameters
----------
lengths
The original lengths of each sample
max_length
When do_merge is True,
We set the max_length constraint on the total length.
When do_merge is False,
We set the max_length constraint on individual sentences.
do_merge
Whether these sentences will be merged
Returns
-------
trimmed_lengths
The trimmed lengths of the sequences.
| get_trimmed_lengths | python | dmlc/gluon-nlp | src/gluonnlp/utils/preprocessing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/preprocessing.py | Apache-2.0 |
def match_tokens_with_char_spans(token_offsets: np.ndarray,
spans: np.ndarray) -> np.ndarray:
"""Match the span offsets with the character-level offsets.
For each span, we perform the following:
1: Cutoff the boundary
span[0] = max(span[0], token_offsets[0, 0])
span[1] = min(span[1], token_offsets[-1, 1])
2: Find start + end
We try to select the smallest number of tokens that cover the entity, i.e.,
we will find start + end, in which tokens[start:end + 1] covers the span.
We will use the following algorithm:
For "start", we search for
token_offsets[start, 0] <= span[0] < token_offsets[start + 1, 0]
For "end", we search for:
token_offsets[end - 1, 1] < spans[1] <= token_offsets[end, 1]
Parameters
----------
token_offsets
The offsets of the input tokens. Must be sorted.
That is, it will satisfy
1. token_offsets[i][0] <= token_offsets[i][1]
2. token_offsets[i][0] <= token_offsets[i + 1][0]
3. token_offsets[i][1] <= token_offsets[i + 1][1]
Shape (#num_tokens, 2)
spans
The character-level offsets (begin/end) of the selected spans.
Shape (#spans, 2)
Returns
-------
token_start_ends
The token-level starts and ends. The end will also be used.
Shape (#spans, 2)
"""
if not isinstance(token_offsets, np.ndarray):
token_offsets = np.array(token_offsets)
if not isinstance(spans, np.ndarray):
spans = np.array(spans)
offsets_starts = token_offsets[:, 0]
offsets_ends = token_offsets[:, 1]
span_char_starts = spans[:, 0]
span_char_ends = spans[:, 1]
# Truncate the span
span_char_starts = np.maximum(offsets_starts[0], span_char_starts)
span_char_ends = np.minimum(offsets_ends[-1], span_char_ends)
# Search for valid start + end
span_token_starts = np.searchsorted(offsets_starts, span_char_starts, side='right') - 1
span_token_ends = np.searchsorted(offsets_ends, span_char_ends, side='left')
return np.concatenate((np.expand_dims(span_token_starts, axis=-1),
np.expand_dims(span_token_ends, axis=-1)), axis=-1) | Match the span offsets with the character-level offsets.
For each span, we perform the following:
1: Cutoff the boundary
span[0] = max(span[0], token_offsets[0, 0])
span[1] = min(span[1], token_offsets[-1, 1])
2: Find start + end
We try to select the smallest number of tokens that cover the entity, i.e.,
we will find start + end, in which tokens[start:end + 1] covers the span.
We will use the following algorithm:
For "start", we search for
token_offsets[start, 0] <= span[0] < token_offsets[start + 1, 0]
For "end", we search for:
token_offsets[end - 1, 1] < spans[1] <= token_offsets[end, 1]
Parameters
----------
token_offsets
The offsets of the input tokens. Must be sorted.
That is, it will satisfy
1. token_offsets[i][0] <= token_offsets[i][1]
2. token_offsets[i][0] <= token_offsets[i + 1][0]
3. token_offsets[i][1] <= token_offsets[i + 1][1]
Shape (#num_tokens, 2)
spans
The character-level offsets (begin/end) of the selected spans.
Shape (#spans, 2)
Returns
-------
token_start_ends
The token-level starts and ends. The end will also be used.
Shape (#spans, 2)
| match_tokens_with_char_spans | python | dmlc/gluon-nlp | src/gluonnlp/utils/preprocessing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/preprocessing.py | Apache-2.0 |
def register(self, *args):
"""
Register the given object under either the nickname or `obj.__name__`. It can be used as
either a decorator or not. See docstring of this class for usage.
"""
if len(args) == 2:
# Register an object with nick name by function call
nickname, obj = args
self._do_register(nickname, obj)
elif len(args) == 1:
if isinstance(args[0], str):
# Register an object with nick name by decorator
nickname = args[0]
def deco(func_or_class: object) -> object:
self._do_register(nickname, func_or_class)
return func_or_class
return deco
else:
# Register an object by function call
self._do_register(args[0].__name__, args[0])
elif len(args) == 0:
# Register an object by decorator
def deco(func_or_class: object) -> object:
self._do_register(func_or_class.__name__, func_or_class)
return func_or_class
return deco
else:
raise ValueError('Do not support the usage!') |
Register the given object under either the nickname or `obj.__name__`. It can be used as
either a decorator or not. See docstring of this class for usage.
| register | python | dmlc/gluon-nlp | src/gluonnlp/utils/registry.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/registry.py | Apache-2.0 |
def create(self, name: str, *args, **kwargs) -> object:
"""Create the class object with the given args and kwargs
Parameters
----------
name
The name in the registry
args
kwargs
Returns
-------
ret
The created object
"""
obj = self.get(name)
try:
return obj(*args, **kwargs)
except Exception as exp:
print('Cannot create name="{}" --> {} with the provided arguments!\n'
' args={},\n'
' kwargs={},\n'
.format(name, obj, args, kwargs))
raise exp | Create the class object with the given args and kwargs
Parameters
----------
name
The name in the registry
args
kwargs
Returns
-------
ret
The created object
| create | python | dmlc/gluon-nlp | src/gluonnlp/utils/registry.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/registry.py | Apache-2.0 |
def serialize(path, tbl):
"""Serialize tbl with out-of-band data to path for zero-copy shared memory usage.
If the object to be serialized itself, or the objects it uses for data
storage (such as numpy arrays) implement the the pickle protocol version 5
pickle.PickleBuffer type in __reduce_ex__, then this function can store
these buffers out-of-band as files in `path` so that they subsequently be
re-used for zero-copy sharing accross processes.
Parameters
----------
path : pathlib.Path
Empty folder used to save serialized data. Usually a folder /dev/shm
tbl : object
Object to serialize. For example a PyArrow Table, a Pandas Dataframe or
any type that relies on NumPy to store the binary data.
"""
idx = 0
def buffer_callback(buf):
nonlocal idx
with open(path / f'{idx}.bin', 'wb') as f:
f.write(buf)
idx += 1
with open(path / 'meta.pkl', 'wb') as f:
pickle.dump(tbl, f, protocol=5, buffer_callback=buffer_callback) | Serialize tbl with out-of-band data to path for zero-copy shared memory usage.
If the object to be serialized itself, or the objects it uses for data
storage (such as numpy arrays) implement the the pickle protocol version 5
pickle.PickleBuffer type in __reduce_ex__, then this function can store
these buffers out-of-band as files in `path` so that they subsequently be
re-used for zero-copy sharing accross processes.
Parameters
----------
path : pathlib.Path
Empty folder used to save serialized data. Usually a folder /dev/shm
tbl : object
Object to serialize. For example a PyArrow Table, a Pandas Dataframe or
any type that relies on NumPy to store the binary data.
| serialize | python | dmlc/gluon-nlp | src/gluonnlp/utils/shm.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/shm.py | Apache-2.0 |
def load(path):
"""Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
"""
num_buffers = len(list(path.iterdir())) - 1 # exclude meta.idx
buffers = []
for idx in range(num_buffers):
f = open(path / f'{idx}.bin', 'rb')
buffers.append(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ))
with open(path / 'meta.pkl', 'rb') as f:
return pickle.load(f, buffers=buffers) | Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
| load | python | dmlc/gluon-nlp | src/gluonnlp/utils/shm.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/shm.py | Apache-2.0 |
def is_match_states_batch_size(states, states_batch_axis, batch_size) -> bool:
"""Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size
Returns
-------
ret
"""
if states_batch_axis is None:
return True
if isinstance(states_batch_axis, int):
if states.shape[states_batch_axis] == batch_size:
return True
for ele_states_batch_axis, ele_states in zip(states_batch_axis, states):
ret = is_match_states_batch_size(ele_states, ele_states_batch_axis, batch_size)
if ret is False:
return False
return True | Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size
Returns
-------
ret
| is_match_states_batch_size | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def verify_nmt_model(model, batch_size: int = 4,
src_seq_length: int = 5,
tgt_seq_length: int = 10,
atol: float = 1E-4,
rtol: float = 1E-3):
"""Verify the correctness of an NMT model. Raise error message if it detects problems.
Parameters
----------
model
The machine translation model
batch_size
The batch size to test the nmt model
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance.
rtol
Relative tolerance.
"""
src_word_sequence = mx.np.random.randint(0, model.src_vocab_size, (batch_size, src_seq_length))
tgt_word_sequence = mx.np.random.randint(0, model.tgt_vocab_size, (batch_size, tgt_seq_length))
src_valid_length = mx.np.random.randint(1, src_seq_length, (batch_size,))
min_tgt_seq_length = max(1, tgt_seq_length - 5)
tgt_valid_length = mx.np.random.randint(min_tgt_seq_length, tgt_seq_length, (batch_size,))
if model.layout == 'NT':
full_out = model(src_word_sequence, src_valid_length, tgt_word_sequence, tgt_valid_length)
else:
full_out = model(src_word_sequence.T, src_valid_length,
tgt_word_sequence.T, tgt_valid_length)
full_out = mx.np.swapaxes(full_out, 0, 1)
if full_out.shape != (batch_size, tgt_seq_length, model.tgt_vocab_size):
raise AssertionError('The output of NMT model does not match the expected output.'
' Model output shape = {}, Expected (B, T, V) = {}'
.format(full_out.shape,
(batch_size, tgt_seq_length, model.tgt_vocab_size)))
for partial_batch_size in range(1, batch_size + 1):
for i in range(1, min_tgt_seq_length):
if model.layout == 'NT':
partial_out = model(src_word_sequence[:partial_batch_size, :],
src_valid_length[:partial_batch_size],
tgt_word_sequence[:partial_batch_size, :(-i)],
tgt_valid_length[:partial_batch_size]
- mx.np.array(i, dtype=tgt_valid_length.dtype))
else:
partial_out = model(src_word_sequence[:partial_batch_size, :].T,
src_valid_length[:partial_batch_size],
tgt_word_sequence[:partial_batch_size, :(-i)].T,
tgt_valid_length[:partial_batch_size]
- mx.np.array(i, dtype=tgt_valid_length.dtype))
partial_out = mx.np.swapaxes(partial_out, 0, 1)
# Verify that the partial output matches the full output
for b in range(partial_batch_size):
partial_vl = tgt_valid_length.asnumpy()[b] - i
npt.assert_allclose(full_out[b, :partial_vl].asnumpy(),
partial_out[b, :partial_vl].asnumpy(), atol, rtol) | Verify the correctness of an NMT model. Raise error message if it detects problems.
Parameters
----------
model
The machine translation model
batch_size
The batch size to test the nmt model
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance.
rtol
Relative tolerance.
| verify_nmt_model | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def verify_nmt_inference(train_model, inference_model,
batch_size=4, src_seq_length=5,
tgt_seq_length=10, atol=1E-4, rtol=1E-3):
"""Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
train_model
The training model
inference_model
The inference model
batch_size
Batch size
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance
rtol
Relative tolerance
"""
if train_model.layout == 'NT':
src_word_sequences = mx.np.random.randint(0, train_model.src_vocab_size,
(batch_size, src_seq_length))
tgt_word_sequences = mx.np.random.randint(0, train_model.tgt_vocab_size,
(batch_size, tgt_seq_length))
else:
src_word_sequences = mx.np.random.randint(0, train_model.src_vocab_size,
(src_seq_length, batch_size))
tgt_word_sequences = mx.np.random.randint(0, train_model.tgt_vocab_size,
(tgt_seq_length, batch_size))
src_valid_length = mx.np.random.randint(1, src_seq_length, (batch_size,))
min_tgt_seq_length = max(1, tgt_seq_length - 5)
tgt_valid_length = mx.np.random.randint(min_tgt_seq_length, tgt_seq_length, (batch_size,))
full_out = train_model(src_word_sequences, src_valid_length,
tgt_word_sequences, tgt_valid_length)
if train_model.layout == 'NT':
for partial_batch_size in range(1, batch_size + 1):
step_out_l = []
states = inference_model.init_states(src_word_sequences[:partial_batch_size, :],
src_valid_length[:partial_batch_size])
assert is_match_states_batch_size(states, inference_model.state_batch_axis,
partial_batch_size)
for i in range(min_tgt_seq_length):
step_out, states = inference_model(tgt_word_sequences[:partial_batch_size, i],
states)
step_out_l.append(step_out)
partial_out = mx.np.stack(step_out_l, axis=1)
npt.assert_allclose(full_out[:partial_batch_size, :min_tgt_seq_length].asnumpy(),
partial_out[:partial_batch_size, :].asnumpy(), atol, rtol)
elif train_model.layout == 'TN':
for partial_batch_size in range(1, batch_size + 1):
step_out_l = []
states = inference_model.init_states(src_word_sequences[:, :partial_batch_size],
src_valid_length[:partial_batch_size])
assert is_match_states_batch_size(states, inference_model.state_batch_axis,
partial_batch_size)
for i in range(min_tgt_seq_length):
step_out, states = inference_model(tgt_word_sequences[i, :partial_batch_size],
states)
step_out_l.append(step_out)
partial_out = mx.np.stack(step_out_l, axis=0)
npt.assert_allclose(full_out[:min_tgt_seq_length, :partial_batch_size].asnumpy(),
partial_out[:, :partial_batch_size].asnumpy(), atol, rtol)
else:
raise NotImplementedError | Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
train_model
The training model
inference_model
The inference model
batch_size
Batch size
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the target sequence
atol
Absolute tolerance
rtol
Relative tolerance
| verify_nmt_inference | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def _cast_nested_to_fp16(nested_dat):
"""Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
"""
if isinstance(nested_dat, (mx.np.ndarray, np.ndarray)):
if nested_dat.dtype == np.float32:
return nested_dat.astype(np.float16)
else:
return nested_dat
elif isinstance(nested_dat, list):
return [_cast_nested_to_fp16(ele) for ele in nested_dat]
elif isinstance(nested_dat, tuple):
return tuple([_cast_nested_to_fp16(ele) for ele in nested_dat])
else:
raise NotImplementedError('Type is not supported!') | Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
| _cast_nested_to_fp16 | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def verify_backbone_fp16(model_cls, cfg, ctx, inputs,
atol=1E-2, rtol=1E-2, check_amp=True):
"""Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
The context
inputs
The input tensors of the model. We will
atol
The absolute tolerance
rtol
The relative tolerance
check_amp
Whether to check the AMP process. You will need to ensure that there is no
randomness in the model when it is turned on.
"""
model_fp32 = model_cls.from_cfg(cfg, dtype='float32')
model_fp32.initialize(ctx=ctx)
model_fp32.hybridize()
# Check forward
fp32_inputs = move_to_ctx(inputs, ctx=ctx)
outputs_fp32 = model_fp32(*fp32_inputs)
mx.npx.waitall()
# Check forward of fp16
model_fp16 = model_cls.from_cfg(cfg, dtype='float16')
model_fp16.share_parameters(model_fp32.collect_params())
model_fp16.cast('float16')
model_fp16.hybridize()
for param in model_fp16.collect_params().values():
assert param.dtype == 'float16'
fp16_inputs = move_to_ctx(_cast_nested_to_fp16(inputs), ctx=ctx)
outputs_fp16 = model_fp16(*fp16_inputs)
mx.npx.waitall()
_match_struct_output(outputs_fp16, outputs_fp32, atol=atol, rtol=rtol)
if check_amp:
from mxnet import amp
amp.init()
# Reconstruct the fp32 model
model_fp32 = model_cls.from_cfg(cfg, dtype='float32')
model_fp32.initialize(ctx=ctx)
model_fp32.hybridize()
trainer = mx.gluon.Trainer(model_fp32.collect_params(), 'adam',
{'learning_rate': 1E-3, 'wd': 1E-4,
'multi_precision': True},
update_on_kvstore=False)
amp.init_trainer(trainer)
with mx.autograd.record():
outputs_amp = model_fp32(*fp32_inputs)
if not isinstance(outputs_amp, (tuple, list)):
loss = outputs_amp.mean()
else:
loss = sum([ele.mean() for ele in outputs_amp])
with amp.scale_loss(loss, trainer) as scaled_loss:
mx.autograd.backward(scaled_loss)
trainer.step(1)
mx.npx.waitall() | Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
The context
inputs
The input tensors of the model. We will
atol
The absolute tolerance
rtol
The relative tolerance
check_amp
Whether to check the AMP process. You will need to ensure that there is no
randomness in the model when it is turned on.
| verify_backbone_fp16 | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def get_ec2_tvm_flags() -> Dict[str, Dict]:
r"""Return the recommended flags for TVM compilation in AWS EC2 instances.
Including C4, C5, G4, P3.
For more details about AWS EC2 instances, refer to https://aws.amazon.com/ec2/instance-types/.
Returns
-------
info_dict
A dictionary that contains the mapping between instance type and the
corresponding compilation flags.
Each element includes:
- target
The compilation target
- use_gpu
Whether it's a GPU instance
- opt_level
The optimization level in compilation
- pass
Additional graph passes for further improvement.
"""
instance_info = {
'g4': {'target': "cuda -model=t4 -libs=cublas,cudnn",
'use_gpu': True,
'opt_level': 3,
'required_pass': ["FastMath"]},
'c4': {'target': 'llvm -mcpu=core-avx2 -libs=cblas',
'use_gpu': False,
'opt_level': 3,
'required_pass': ["FastMath"]},
'c5': {'target': 'llvm -mcpu=skylake-avx512 -libs=cblas',
'use_gpu': False,
'opt_level': 3,
'required_pass': ["FastMath"]},
'p3': {'target': 'cuda -model=v100 -libs=cublas,cudnn',
'use_gpu': True,
'opt_level': 3,
'required_pass': ["FastMath"]}
}
return instance_info | Return the recommended flags for TVM compilation in AWS EC2 instances.
Including C4, C5, G4, P3.
For more details about AWS EC2 instances, refer to https://aws.amazon.com/ec2/instance-types/.
Returns
-------
info_dict
A dictionary that contains the mapping between instance type and the
corresponding compilation flags.
Each element includes:
- target
The compilation target
- use_gpu
Whether it's a GPU instance
- opt_level
The optimization level in compilation
- pass
Additional graph passes for further improvement.
| get_ec2_tvm_flags | python | dmlc/gluon-nlp | src/gluonnlp/utils/tvm_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/tvm_utils.py | Apache-2.0 |
def update_tvm_convert_map() -> None:
"""A Monkey Patch to update convert map in tvm/relay/frontend/mxnet.py"""
op = (('masked_softmax', _mx_masked_softmax),)
_convert_map.update({key: value for key, value in op}) | A Monkey Patch to update convert map in tvm/relay/frontend/mxnet.py | update_tvm_convert_map | python | dmlc/gluon-nlp | src/gluonnlp/utils/tvm_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/tvm_utils.py | Apache-2.0 |
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue) | Collect data into fixed-length chunks or blocks | grouper | python | dmlc/gluon-nlp | tests/test_gluon_block.py | https://github.com/dmlc/gluon-nlp/blob/master/tests/test_gluon_block.py | Apache-2.0 |
def test_test():
"""Test that fixing a random seed works."""
py_rnd = random.randint(0, 100)
np_rnd = np.random.randint(0, 100)
mx_rnd = mx.nd.random_uniform(shape=(1, )).asscalar()
random.seed(1)
mx.random.seed(1)
np.random.seed(1)
assert py_rnd == random.randint(0, 100)
assert np_rnd == np.random.randint(0, 100)
assert mx_rnd == mx.nd.random_uniform(shape=(1, )).asscalar() | Test that fixing a random seed works. | test_test | python | dmlc/gluon-nlp | tests/test_pytest.py | https://github.com/dmlc/gluon-nlp/blob/master/tests/test_pytest.py | Apache-2.0 |
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS) | Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
| is_image_file | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target) |
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
| __getitem__ | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root,'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target) |
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
| __getitem__ | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target |
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
| __getitem__ | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze() | Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
| torch_cov | python | ajbrock/BigGAN-PyTorch | inception_utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py | MIT |
def numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
print('wat')
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
out = diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
return out | Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
| numpy_calculate_frechet_distance | python | ajbrock/BigGAN-PyTorch | inception_utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py | MIT |
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50).squeeze()
out = (diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2)
- 2 * torch.trace(covmean))
return out | Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
| torch_calculate_frechet_distance | python | ajbrock/BigGAN-PyTorch | inception_utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py | MIT |
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(low=0,high=img.size[0] - size[0]))
j = (0 if size[1] == img.size[1]
else np.random.randint(low=0,high=img.size[1] - size[1]))
return transforms.functional.crop(img, i, j, size[0], size[1]) |
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
| __call__ | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n') |
Assumption: no newlines in the input.
| log | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
"""
total = total or len(items)
t_start = time.time()
t_last = 0
for n, item in enumerate(items):
t_now = time.time()
if t_now - t_last > min_delay:
print("\r%s%d/%d (%6.2f%%)" % (
desc, n+1, total, n / float(total) * 100), end=" ")
if n > 0:
if displaytype == 's1k': # minutes/seconds for 1000 iters
next_1000 = n + (1000 - n%1000)
t_done = t_now - t_start
t_1k = t_done / n * next_1000
outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))
print("(TE/ET1k: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
else:# displaytype == 'eta':
t_done = t_now - t_start
t_total = t_done / n * total
outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))
print("(TE/ETA: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
sys.stdout.flush()
t_last = t_now
yield item
t_total = time.time() - t_start
print("\r%s%d/%d (100.00%%) (took %d:%02d)" % ((desc, total, total) +
divmod(t_total, 60))) |
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
| progress | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Fp32 copy of the weights
state['fp32_p'] = p.data.float()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], state['fp32_p'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)
p.data = state['fp32_p'].half()
return loss | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
# print('a')
# print(type(sum_), type(ssum), type(sum_size), sum_.shape, ssum.shape, sum_size)
# broadcasted = Broadcast.apply(target_gpus, sum_, ssum, torch.tensor(sum_size).float().to(sum_.device))
# print('b')
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
# outputs.append((rec[0], _MasterMessage(*broadcasted[i*3:i*3+3])))
return outputs | Reduce the sum and square-sum, compute the statistics, and broadcast it. | _data_parallel_master | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/batchnorm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/batchnorm.py | MIT |
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, torch.rsqrt(bias_var + self.eps)
# return mean, bias_var.clamp(self.eps) ** -0.5 | Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device. | _compute_mean_std | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/batchnorm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/batchnorm.py | MIT |
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False |
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
| __init__ | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/comm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py | MIT |
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future) |
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
| register_slave | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/comm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py | MIT |
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1] |
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
| run_master | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/comm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py | MIT |
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i) |
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
| execute_replication_callbacks | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/replicate.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/replicate.py | MIT |
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate |
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
| patch_replication_callback | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/replicate.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/replicate.py | MIT |
def dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=False):
"""Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weights.
"""
if os.path.exists(hdf5_path) and (not redownload):
print('Loading BigGAN hdf5 file from:', hdf5_path)
return h5py.File(hdf5_path, 'r')
print('Loading BigGAN module from:', module_path)
tf.reset_default_graph()
hub.Module(module_path)
print('Loaded BigGAN module from:', module_path)
initializer = tf.global_variables_initializer()
sess = tf.Session()
sess.run(initializer)
print('Saving BigGAN weights to :', hdf5_path)
h5f = h5py.File(hdf5_path, 'w')
for var in tf.global_variables():
val = sess.run(var)
h5f.create_dataset(var.name, data=val)
print(f'Saving {var.name} with shape {val.shape}')
h5f.close()
return h5py.File(hdf5_path, 'r') | Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weights.
| dump_tfhub_to_hdf5 | python | ajbrock/BigGAN-PyTorch | TFHub/converter.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/TFHub/converter.py | MIT |
def read_img(t_imgfname, input_size, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(t_imgfname)
# img = tf.image.decode_image(img_contents, channels=3)
img = tf.image.decode_png(img_contents, channels=3)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
# Extract mean.
img -= img_mean
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
newshape = tf.squeeze(tf.stack([h, w]), squeeze_dims=[1])
img2 = tf.image.resize_images(img, newshape)
else:
img2 = tf.image.resize_images(img, tf.shape(img)[0:2,]*2)
return img2, img | Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
| read_img | python | iyah4888/SIGGRAPH18SSS | main_hyper.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/main_hyper.py | MIT |
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--feat-dir", type=str, default=FEATSAVE_DIR,
help="Path to the directory to save the semantic embedding vector map.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--grad-update-every", type=int, default=GRAD_UPDATE_EVERY,
help="Number of steps after which gradient update is applied.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
# parser.add_argument("--is-training", action="store_true",
parser.add_argument("--is-training", action="store_false",
help="Whether to update the running means and variances during the training.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--not-restore-last", action="store_true",
help="Whether to not restore last (FC) layers.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of training steps.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random seed to have reproducible results.")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES,
help="How many images to save.")
parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY,
help="Save summaries and checkpoint every often.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
return parser.parse_args() | Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
| get_arguments | python | iyah4888/SIGGRAPH18SSS | parse_opt.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/parse_opt.py | MIT |
def __init__(self, sess, args):
"""Initialize the parameters.
sess: tensorflow session
"""
self.sess = sess
self.batch_size = args.batch_size
self.args = args
# parameters used to save a checkpoint
self.dataset = "Hypcol"
self.options = []
self._attrs = ['batch_size', 'dataset']
self.build_model() | Initialize the parameters.
sess: tensorflow session
| __init__ | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/hc_deeplab.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/hc_deeplab.py | MIT |
def image_scaling(img, label):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[0]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[1]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
img = tf.image.resize_images(img, new_shape)
label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
return img, label |
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
| image_scaling | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def image_mirroring(img, label):
"""
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
"""
distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
mirror = tf.boolean_mask([0, 1, 2], mirror)
img = tf.reverse(img, mirror)
label = tf.reverse(label, mirror)
return img, label |
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
| image_mirroring | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def random_crop_and_pad_image_and_labels(image, label, crop_h, crop_w, ignore_label=255):
"""
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
"""
label = tf.cast(label, dtype=tf.float32)
label = label - ignore_label # Needs to be subtracted and later added due to 0 padding.
combined = tf.concat(axis=2, values=[image, label])
image_shape = tf.shape(image)
combined_pad = tf.image.pad_to_bounding_box(combined, 0, 0, tf.maximum(crop_h, image_shape[0]), tf.maximum(crop_w, image_shape[1]))
last_image_dim = tf.shape(image)[-1]
last_label_dim = tf.shape(label)[-1]
combined_crop = tf.random_crop(combined_pad, [crop_h,crop_w,4])
img_crop = combined_crop[:, :, :last_image_dim]
label_crop = combined_crop[:, :, last_image_dim:]
label_crop = label_crop + ignore_label
label_crop = tf.cast(label_crop, dtype=tf.uint8)
# Set static shape so that tensorflow knows shape at compile time.
img_crop.set_shape((crop_h, crop_w, 3))
label_crop.set_shape((crop_h,crop_w, 1))
return img_crop, label_crop |
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
| random_crop_and_pad_image_and_labels | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
masks = []
for line in f:
try:
image, mask = line.strip("\n").split(' ')
except ValueError: # Adhoc for test.
image = mask = line.strip("\n")
images.append(os.path.join(data_dir, image))
masks.append(os.path.join(data_dir, mask))
return images, masks | Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
| read_labeled_image_list | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_data_list(data_dir, data_list, ext):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
dataflist = []
for line in f:
try:
dataname = line.strip("\n")
except ValueError: # Adhoc for test.
dataname = line.strip("\n")
dataflist.append(os.path.join(data_dir, dataname+ext))
return dataflist | Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
| read_data_list | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_images_from_disk(input_queue, input_size, random_scale, random_mirror, ignore_label, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(input_queue[0])
label_contents = tf.read_file(input_queue[1])
img = tf.image.decode_jpeg(img_contents, channels=3)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
# Extract mean.
img -= img_mean
label = tf.image.decode_png(label_contents, channels=1)
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
if random_scale:
img, label = image_scaling(img, label)
# Randomly mirror the images and labels.
if random_mirror:
img, label = image_mirroring(img, label)
# Randomly crops the images and labels.
img, label = random_crop_and_pad_image_and_labels(img, label, h, w, ignore_label)
return img, label | Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
| read_images_from_disk | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def __init__(self, data_dir, data_list, input_size,
random_scale, random_mirror, ignore_label, img_mean, coord):
'''Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
input_size: a tuple with (height, width) values, to which all the images will be resized.
random_scale: whether to randomly scale the images prior to random crop.
random_mirror: whether to randomly mirror the images prior to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
coord: TensorFlow queue coordinator.
'''
self.data_dir = data_dir
self.data_list = data_list
self.input_size = input_size
self.coord = coord
self.image_list, self.label_list = read_labeled_image_list(self.data_dir, self.data_list)
self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string)
self.queue = tf.train.slice_input_producer([self.images, self.labels],
shuffle=input_size is not None) # not shuffling if it is val
self.image, self.label = read_images_from_disk(self.queue, self.input_size, random_scale, random_mirror, ignore_label, img_mean) | Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
input_size: a tuple with (height, width) values, to which all the images will be resized.
random_scale: whether to randomly scale the images prior to random crop.
random_mirror: whether to randomly mirror the images prior to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
coord: TensorFlow queue coordinator.
| __init__ | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def dequeue(self, num_elements):
'''Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.'''
image_batch, label_batch = tf.train.batch([self.image, self.label],
num_elements)
return image_batch, label_batch | Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks. | dequeue | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_an_image_from_disk(t_imgfname, t_labelfname, input_size, random_scale, random_mirror, ignore_label, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(t_imgfname)
lbm_contents = tf.read_file(t_labelfname)
img = tf.image.decode_jpeg(img_contents, channels=3)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
# Extract mean.
img -= img_mean
label = tf.image.decode_png(lbm_contents, channels=1)
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
if random_scale:
img, label = image_scaling(img, label)
# Randomly mirror the images and labels.
if random_mirror:
img, label = image_mirroring(img, label)
# Randomly crops the images and labels.
img, label = random_crop_and_pad_image_and_labels(img, label, h, w, ignore_label)
return img, label | Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
| read_an_image_from_disk | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def setup(self, is_training, num_classes):
'''Network definition.
Args:
is_training: whether to update the running mean and variance of the batch normalisation layer.
If the batch size is small, it is better to keep the running mean and variance of
the-pretrained model frozen.
num_classes: number of classes to predict (including background).
'''
(self.feed('data')
.conv(7, 7, 64, 2, 2, biased=False, relu=False, name='conv1')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn_conv1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch1'))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch2c'))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2b_branch2c'))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2c_branch2c'))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch1'))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch2c'))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b1_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3b1_branch2c'))
(self.feed('res3a_relu',
'bn3b1_branch2c')
.add(name='res3b1')
.relu(name='res3b1_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b2_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3b2_branch2c'))
(self.feed('res3b1_relu',
'bn3b2_branch2c')
.add(name='res3b2')
.relu(name='res3b2_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b3_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3b3_branch2c'))
(self.feed('res3b2_relu',
'bn3b3_branch2c')
.add(name='res3b3')
.relu(name='res3b3_relu')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch1'))
(self.feed('res3b3_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch2c'))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b1_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b1_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b1_branch2c'))
(self.feed('res4a_relu',
'bn4b1_branch2c')
.add(name='res4b1')
.relu(name='res4b1_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b2_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b2_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b2_branch2c'))
(self.feed('res4b1_relu',
'bn4b2_branch2c')
.add(name='res4b2')
.relu(name='res4b2_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b3_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b3_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b3_branch2c'))
(self.feed('res4b2_relu',
'bn4b3_branch2c')
.add(name='res4b3')
.relu(name='res4b3_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b4_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b4_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b4_branch2c'))
(self.feed('res4b3_relu',
'bn4b4_branch2c')
.add(name='res4b4')
.relu(name='res4b4_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b5_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b5_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b5_branch2c'))
(self.feed('res4b4_relu',
'bn4b5_branch2c')
.add(name='res4b5')
.relu(name='res4b5_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b6_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b6_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b6_branch2c'))
(self.feed('res4b5_relu',
'bn4b6_branch2c')
.add(name='res4b6')
.relu(name='res4b6_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b7_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b7_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b7_branch2c'))
(self.feed('res4b6_relu',
'bn4b7_branch2c')
.add(name='res4b7')
.relu(name='res4b7_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b8_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b8_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b8_branch2c'))
(self.feed('res4b7_relu',
'bn4b8_branch2c')
.add(name='res4b8')
.relu(name='res4b8_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b9_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b9_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b9_branch2c'))
(self.feed('res4b8_relu',
'bn4b9_branch2c')
.add(name='res4b9')
.relu(name='res4b9_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b10_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b10_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b10_branch2c'))
(self.feed('res4b9_relu',
'bn4b10_branch2c')
.add(name='res4b10')
.relu(name='res4b10_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b11_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b11_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b11_branch2c'))
(self.feed('res4b10_relu',
'bn4b11_branch2c')
.add(name='res4b11')
.relu(name='res4b11_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b12_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b12_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b12_branch2c'))
(self.feed('res4b11_relu',
'bn4b12_branch2c')
.add(name='res4b12')
.relu(name='res4b12_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b13_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b13_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b13_branch2c'))
(self.feed('res4b12_relu',
'bn4b13_branch2c')
.add(name='res4b13')
.relu(name='res4b13_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b14_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b14_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b14_branch2c'))
(self.feed('res4b13_relu',
'bn4b14_branch2c')
.add(name='res4b14')
.relu(name='res4b14_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b15_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b15_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b15_branch2c'))
(self.feed('res4b14_relu',
'bn4b15_branch2c')
.add(name='res4b15')
.relu(name='res4b15_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b16_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b16_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b16_branch2c'))
(self.feed('res4b15_relu',
'bn4b16_branch2c')
.add(name='res4b16')
.relu(name='res4b16_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b17_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b17_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b17_branch2c'))
(self.feed('res4b16_relu',
'bn4b17_branch2c')
.add(name='res4b17')
.relu(name='res4b17_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b18_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b18_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b18_branch2c'))
(self.feed('res4b17_relu',
'bn4b18_branch2c')
.add(name='res4b18')
.relu(name='res4b18_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b19_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b19_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b19_branch2c'))
(self.feed('res4b18_relu',
'bn4b19_branch2c')
.add(name='res4b19')
.relu(name='res4b19_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b20_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b20_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b20_branch2c'))
(self.feed('res4b19_relu',
'bn4b20_branch2c')
.add(name='res4b20')
.relu(name='res4b20_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b21_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b21_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b21_branch2c'))
(self.feed('res4b20_relu',
'bn4b21_branch2c')
.add(name='res4b21')
.relu(name='res4b21_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b22_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b22_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b22_branch2c'))
(self.feed('res4b21_relu',
'bn4b22_branch2c')
.add(name='res4b22')
.relu(name='res4b22_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch1'))
(self.feed('res4b22_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2a')
.atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch2c'))
(self.feed('bn5a_branch1',
'bn5a_branch2c')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2a')
.atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5b_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5b_branch2c'))
(self.feed('res5a_relu',
'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5c_branch2a')
.atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5c_branch2b')
.batch_normalization(activation_fn=tf.nn.relu, name='bn5c_branch2b', is_training=is_training)
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5c_branch2c'))
(self.feed('res5b_relu',
'bn5c_branch2c')
.add(name='res5c')
.relu(name='res5c_relu')
.atrous_conv(3, 3, num_classes, 6, padding='SAME', relu=False, name='fc1_voc12_c0'))
(self.feed('res5c_relu')
.atrous_conv(3, 3, num_classes, 12, padding='SAME', relu=False, name='fc1_voc12_c1'))
(self.feed('res5c_relu')
.atrous_conv(3, 3, num_classes, 18, padding='SAME', relu=False, name='fc1_voc12_c2'))
(self.feed('res5c_relu')
.atrous_conv(3, 3, num_classes, 24, padding='SAME', relu=False, name='fc1_voc12_c3'))
(self.feed('fc1_voc12_c0',
'fc1_voc12_c1',
'fc1_voc12_c2',
'fc1_voc12_c3')
.add(name='fc1_voc12')) | Network definition.
Args:
is_training: whether to update the running mean and variance of the batch normalisation layer.
If the batch size is small, it is better to keep the running mean and variance of
the-pretrained model frozen.
num_classes: number of classes to predict (including background).
| setup | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/model.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/model.py | MIT |
def decode_labels(mask, num_images=1, num_classes=21):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs | Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
| decode_labels | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/utils.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py | MIT |
def prepare_label(input_batch, new_size, num_classes, one_hot=True):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
num_classes: number of classes to predict (including background).
one_hot: whether perform one-hot encoding.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
if one_hot:
input_batch = tf.one_hot(input_batch, depth=num_classes)
return input_batch | Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
num_classes: number of classes to predict (including background).
one_hot: whether perform one-hot encoding.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
| prepare_label | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/utils.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py | MIT |
def inv_preprocess(imgs, num_images, img_mean):
"""Inverse preprocessing of the batch of images.
Add the mean vector and convert from BGR to RGB.
Args:
imgs: batch of input images.
num_images: number of images to apply the inverse transformations on.
img_mean: vector of mean colour values.
Returns:
The batch of the size num_images with the same spatial dimensions as the input.
"""
n, h, w, c = imgs.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, c), dtype=np.uint8)
for i in range(num_images):
outputs[i] = (imgs[i] + img_mean)[:, :, ::-1].astype(np.uint8)
return outputs | Inverse preprocessing of the batch of images.
Add the mean vector and convert from BGR to RGB.
Args:
imgs: batch of input images.
num_images: number of images to apply the inverse transformations on.
img_mean: vector of mean colour values.
Returns:
The batch of the size num_images with the same spatial dimensions as the input.
| inv_preprocess | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/utils.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py | MIT |
def __init__(self, def_path, phase='test'):
'''
def_path: Path to the model definition (.prototxt)
data_path: Path to the model data (.caffemodel)
phase: Either 'test' or 'train'. Used for filtering phase-specific nodes.
'''
self.def_path = def_path
self.phase = phase
self.load() |
def_path: Path to the model definition (.prototxt)
data_path: Path to the model data (.caffemodel)
phase: Either 'test' or 'train'. Used for filtering phase-specific nodes.
| __init__ | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def load(self):
'''Load the layer definitions from the prototxt.'''
self.params = get_caffe_resolver().NetParameter()
with open(self.def_path, 'rb') as def_file:
text_format.Merge(def_file.read(), self.params) | Load the layer definitions from the prototxt. | load | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def filter_layers(self, layers):
'''Filter out layers based on the current phase.'''
phase_map = {0: 'train', 1: 'test'}
filtered_layer_names = set()
filtered_layers = []
for layer in layers:
phase = self.phase
if len(layer.include):
phase = phase_map[layer.include[0].phase]
if len(layer.exclude):
phase = phase_map[1 - layer.include[0].phase]
exclude = (phase != self.phase)
# Dropout layers appear in a fair number of Caffe
# test-time networks. These are just ignored. We'll
# filter them out here.
if (not exclude) and (phase == 'test'):
exclude = (layer.type == LayerType.Dropout)
if not exclude:
filtered_layers.append(layer)
# Guard against dupes.
assert layer.name not in filtered_layer_names
filtered_layer_names.add(layer.name)
return filtered_layers | Filter out layers based on the current phase. | filter_layers | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def make_node(self, layer):
'''Create a graph node for the given layer.'''
kind = NodeKind.map_raw_kind(layer.type)
if kind is None:
raise KaffeError('Unknown layer type encountered: %s' % layer.type)
# We want to use the layer's top names (the "output" names), rather than the
# name attribute, which is more of readability thing than a functional one.
# Other layers will refer to a node by its "top name".
return Node(layer.name, kind, layer=layer) | Create a graph node for the given layer. | make_node | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def make_input_nodes(self):
'''
Create data input nodes.
This method is for old-style inputs, where the input specification
was not treated as a first-class layer in the prototext.
Newer models use the "Input layer" type.
'''
nodes = [Node(name, NodeKind.Data) for name in self.params.input]
if len(nodes):
input_dim = list(map(int, self.params.input_dim))
if not input_dim:
if len(self.params.input_shape) > 0:
input_dim = list(map(int, self.params.input_shape[0].dim))
else:
raise KaffeError('Dimensions for input not specified.')
for node in nodes:
node.output_shape = tuple(input_dim)
return nodes |
Create data input nodes.
This method is for old-style inputs, where the input specification
was not treated as a first-class layer in the prototext.
Newer models use the "Input layer" type.
| make_input_nodes | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def build(self):
'''
Builds the graph from the Caffe layer definitions.
'''
# Get the layers
layers = self.params.layers or self.params.layer
# Filter out phase-excluded layers
layers = self.filter_layers(layers)
# Get any separately-specified input layers
nodes = self.make_input_nodes()
nodes += [self.make_node(layer) for layer in layers]
# Initialize the graph
graph = Graph(nodes=nodes, name=self.params.name)
# Connect the nodes
#
# A note on layers and outputs:
# In Caffe, each layer can produce multiple outputs ("tops") from a set of inputs
# ("bottoms"). The bottoms refer to other layers' tops. The top can rewrite a bottom
# (in case of in-place operations). Note that the layer's name is not used for establishing
# any connectivity. It's only used for data association. By convention, a layer with a
# single top will often use the same name (although this is not required).
#
# The current implementation only supports single-output nodes (note that a node can still
# have multiple children, since multiple child nodes can refer to the single top's name).
node_outputs = {}
for layer in layers:
node = graph.get_node(layer.name)
for input_name in layer.bottom:
assert input_name != layer.name
parent_node = node_outputs.get(input_name)
if (parent_node is None) or (parent_node == node):
parent_node = graph.get_node(input_name)
node.add_parent(parent_node)
if len(layer.top)>1:
raise KaffeError('Multiple top nodes are not supported.')
for output_name in layer.top:
if output_name == layer.name:
# Output is named the same as the node. No further action required.
continue
# There are two possibilities here:
#
# Case 1: output_name refers to another node in the graph.
# This is an "in-place operation" that overwrites an existing node.
# This would create a cycle in the graph. We'll undo the in-placing
# by substituting this node wherever the overwritten node is referenced.
#
# Case 2: output_name violates the convention layer.name == output_name.
# Since we are working in the single-output regime, we will can rename it to
# match the layer name.
#
# For both cases, future references to this top re-routes to this node.
node_outputs[output_name] = node
graph.compute_output_shapes()
return graph |
Builds the graph from the Caffe layer definitions.
| build | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise | Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
| load | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/network.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py | MIT |
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self | Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
| feed | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/network.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py | MIT |
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in list(self.layers.items())) + 1
return '%s_%d' % (prefix, ident) | Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
| get_unique_name | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/network.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py | MIT |
def get_padding_type(kernel_params, input_shape, output_shape):
'''Translates Caffe's numeric padding to one of ('SAME', 'VALID').
Caffe supports arbitrary padding values, while TensorFlow only
supports 'SAME' and 'VALID' modes. So, not all Caffe paddings
can be translated to TensorFlow. There are some subtleties to
how the padding edge-cases are handled. These are described here:
https://github.com/Yangqing/caffe2/blob/master/caffe2/proto/caffe2_legacy.proto
'''
k_h, k_w, s_h, s_w, p_h, p_w = kernel_params
s_o_h = np.ceil(input_shape.height / float(s_h))
s_o_w = np.ceil(input_shape.width / float(s_w))
if (output_shape.height == s_o_h) and (output_shape.width == s_o_w):
return 'SAME'
v_o_h = np.ceil((input_shape.height - k_h + 1.0) / float(s_h))
v_o_w = np.ceil((input_shape.width - k_w + 1.0) / float(s_w))
if (output_shape.height == v_o_h) and (output_shape.width == v_o_w):
return 'VALID'
return None | Translates Caffe's numeric padding to one of ('SAME', 'VALID').
Caffe supports arbitrary padding values, while TensorFlow only
supports 'SAME' and 'VALID' modes. So, not all Caffe paddings
can be translated to TensorFlow. There are some subtleties to
how the padding edge-cases are handled. These are described here:
https://github.com/Yangqing/caffe2/blob/master/caffe2/proto/caffe2_legacy.proto
| get_padding_type | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/transformer.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/transformer.py | MIT |
def run(
self,
query="What is a lagrangian?",
limit_broad_results=1_000,
limit_deduped_url_results=50,
limit_hierarchical_url_results=50,
limit_final_pagerank_results=20,
url_contains_filter=None,
):
"""Run a search query using the WebSearchEngine client"""
query_vector = self.client.get_query_vector(query)
broad_results = self.client.similarity_search(
query_vector=query_vector, limit=limit_broad_results
)
if not url_contains_filter:
url_contains_filter = []
deduped_url_results = select_top_urls(
broad_results,
max_urls=limit_deduped_url_results,
url_contains=url_contains_filter,
)
hierarchical_url_results = (
self.client.hierarchical_similarity_reranking(
query_vector=query_vector,
urls=deduped_url_results,
limit=limit_hierarchical_url_results,
)
)
pagerank_reranked_results = self.client.pagerank_reranking(
hierarchical_url_results
)[:limit_final_pagerank_results]
return pagerank_reranked_results | Run a search query using the WebSearchEngine client | run | python | SciPhi-AI/agent-search | agent_search/app/server.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/app/server.py | Apache-2.0 |
def to_string_dict(self) -> dict:
"""Returns a dictionary representation with all values as strings."""
return {
"score": str(self.score),
"url": self.url,
"title": self.title,
"dataset": self.dataset,
"metadata": self.metadata,
"text": self.text,
} | Returns a dictionary representation with all values as strings. | to_string_dict | python | SciPhi-AI/agent-search | agent_search/core/search_types.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/search_types.py | Apache-2.0 |
def select_top_urls(
ordered_points: List[AgentSearchResult],
max_urls: int = 10,
url_contains: Optional[List[str]] = None,
) -> List[str]:
"""A function to return the top unique URLs from the given poitns results."""
if not url_contains:
url_contains = []
top_urls = set([])
for point in ordered_points:
url = point.url
if url in top_urls:
continue
url_contains_match = False if url_contains else True
for url_contain in url_contains:
if url_contain in url:
url_contains_match = True
break
if not url_contains_match:
continue
top_urls.add(point.url)
if len(top_urls) >= max_urls:
break
return list(top_urls) | A function to return the top unique URLs from the given poitns results. | select_top_urls | python | SciPhi-AI/agent-search | agent_search/core/utils.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/utils.py | Apache-2.0 |
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:
"""Compute the cosine similarity between two vectors."""
dot_product = np.dot(v1, v2)
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
return dot_product / (norm_v1 * norm_v2) | Compute the cosine similarity between two vectors. | cosine_similarity | python | SciPhi-AI/agent-search | agent_search/core/utils.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/utils.py | Apache-2.0 |
def __init__(
self,
api_base: Optional[str] = None,
api_key: Optional[str] = None,
timeout: int = 30,
) -> None:
"""
Initializes the SciPhi client.
Args:
api_base (Optional[str]): Base URL for the SciPhi API.
api_key (Optional[str]): API key for authenticating requests.
timeout (int): Timeout for API requests in seconds.
Raises:
ValueError: If `api_key` is not provided.
"""
self.api_base = (
api_base or os.getenv("SCIPHI_API_BASE") or "https://api.sciphi.ai"
)
self.api_key = api_key or os.getenv("SCIPHI_API_KEY")
if not self.api_key:
raise ValueError(
"You must specify an explicit api_key or define `SCIPHI_API_KEY` to initialize a SciPhi client."
)
self.timeout = timeout
self.client = httpx.Client(
base_url=self.api_base,
headers=self._auth_headers(),
timeout=timeout,
) |
Initializes the SciPhi client.
Args:
api_base (Optional[str]): Base URL for the SciPhi API.
api_key (Optional[str]): API key for authenticating requests.
timeout (int): Timeout for API requests in seconds.
Raises:
ValueError: If `api_key` is not provided.
| __init__ | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def _handle_api_response(self, response: httpx.Response) -> Dict:
"""
Handles the HTTP response from the API.
Args:
response (httpx.Response): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the response indicates an error.
"""
if response.is_error:
# Handle errors appropriately
raise Exception(
f"API request failed with status {response.status_code}"
)
result = response.json()
return result |
Handles the HTTP response from the API.
Args:
response (httpx.Response): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the response indicates an error.
| _handle_api_response | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def _handle_search_response(self, search_results: Dict[str, str]) -> None:
"""
Handles dictionary search resopnses from the API.
Args:
search_results (Dict[str, str]): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the response indicates an error.
"""
for result in search_results:
if "score" in result:
result["score"] = float(result["score"])
if "metadata" in result:
try:
result["metadata"] = (
json.loads(result["metadata"])
if (
result["metadata"] != None
and result["metadata"] != '""'
)
else {}
)
except Exception as e:
result["metadata"] = dict() |
Handles dictionary search resopnses from the API.
Args:
search_results (Dict[str, str]): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the response indicates an error.
| _handle_search_response | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def _retry_api_request(
self, method: str, url: str, payload: Dict, max_retries: int = 3
):
"""
Common method for retrying API requests with exponential backoff.
Args:
method (str): The HTTP method to use ('get' or 'post').
url (str): The API endpoint.
payload (Dict): The payload for the request.
max_retries (int): Maximum number of retry attempts.
Returns:
Dict: The JSON response from the API.
Raises:
Exception: If the maximum number of retries is reached.
"""
for attempt in range(max_retries):
try:
response = getattr(self.client, method)(url, json=payload)
return self._handle_api_response(response)
except httpx.HTTPError as e:
logger.info(f"HTTP error on attempt {attempt + 1}: {e}")
if attempt < max_retries - 1:
time.sleep(0.5 * (2**attempt))
except Exception as e:
logger.error(f"Error on attempt {attempt + 1}: {e}")
if attempt < max_retries - 1:
time.sleep(0.5 * (2**attempt))
raise Exception("Failed to fetch data after maximum retries.") |
Common method for retrying API requests with exponential backoff.
Args:
method (str): The HTTP method to use ('get' or 'post').
url (str): The API endpoint.
payload (Dict): The payload for the request.
max_retries (int): Maximum number of retry attempts.
Returns:
Dict: The JSON response from the API.
Raises:
Exception: If the maximum number of retries is reached.
| _retry_api_request | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def search(
self, query: str, search_provider: str, max_retries: int = 3
) -> List[Dict]:
"""
Performs a search query using the SciPhi API with retry and backoff logic.
Args:
query (str): The search query string.
search_provider (str): The search provider to use.
max_retries (int): Maximum number of retry attempts.
Returns:
List[Dict]: A list of search results.
"""
url = f"/search"
payload = {"provider": search_provider, "query": query}
try:
handled_response = self._retry_api_request(
"post", url, payload, max_retries
)
self._handle_search_response(handled_response)
return [SearchResult(**ele).dict() for ele in handled_response]
except Exception as e:
logger.error(f"Search request failed: {e}")
return {"error": str(e)} |
Performs a search query using the SciPhi API with retry and backoff logic.
Args:
query (str): The search query string.
search_provider (str): The search provider to use.
max_retries (int): Maximum number of retry attempts.
Returns:
List[Dict]: A list of search results.
| search | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def get_search_rag_response(
self,
query: str,
search_provider: str,
llm_model: str = "SciPhi/Sensei-7B-V1",
temperature: int = 0.2,
top_p: int = 0.95,
):
"""
Retrieves a search RAG (Retrieval-Augmented Generation) response from the API.
Args:
query (str): The search query string.
search_provider (str): The search provider to use.
llm_model (str): The language model to use.
temperature (int): The temperature setting for the query.
top_p (int): The top-p setting for the query.
Returns:
Dict: A dictionary with the search response and related queries.
"""
if query == "":
raise ValueError("Blank query submitted.")
if search_provider not in ["bing", "agent-search"]:
raise ValueError(f"Unsupported provider, {search_provider}")
url = f"/search_rag"
payload = {
"query": query,
"search_provider": search_provider,
"llm_model": llm_model,
"temperature": temperature,
"top_p": top_p,
}
try:
handled_response = self._retry_api_request("post", url, payload)
# rename the other queries to `related_queries` until LLM output is re-factored.
handled_response["related_queries"] = handled_response.pop(
"other_queries"
)
self._handle_search_response(handled_response["search_results"])
# Use Pydantic model for parsing and validation
search_response = SearchRAGResponse(**handled_response)
except Exception as e:
logger.error(f"Search request failed: {e}")
return {"error": str(e)}
return search_response.dict() |
Retrieves a search RAG (Retrieval-Augmented Generation) response from the API.
Args:
query (str): The search query string.
search_provider (str): The search provider to use.
llm_model (str): The language model to use.
temperature (int): The temperature setting for the query.
top_p (int): The top-p setting for the query.
Returns:
Dict: A dictionary with the search response and related queries.
| get_search_rag_response | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def completion(
self,
prompt: str,
llm_model_name: str = "SciPhi/Sensei-7B-V1",
llm_max_tokens_to_sample: int = 1_024,
llm_temperature: float = 0.2,
llm_top_p: float = 0.90,
) -> SearchRAGResponse:
"""
Generates a completion for a given prompt using the SciPhi API.
Args:
prompt (str): The prompt for generating completion.
llm_model_name (str): The language model to use.
llm_max_tokens_to_sample (int): Maximum number of tokens for the sample.
llm_temperature (float): The temperature setting for the query.
llm_top_p (float): The top-p setting for the query.
Returns:
Dict: A dictionary containing the generated completion.
Raises:
ImportError: If the `sciphi-synthesizer` package is not installed.
"""
try:
import synthesizer
except ImportError as e:
raise ImportError(
"Please install run `pip install sciphi-synthesizer` before attempting to generate a completion."
)
from synthesizer.core import LLMProviderName
from synthesizer.interface import LLMInterfaceManager
from synthesizer.llm import GenerationConfig
try:
llm_interface = LLMInterfaceManager.get_interface_from_args(
LLMProviderName("sciphi"),
)
generation_config = GenerationConfig(
model_name=llm_model_name,
max_tokens_to_sample=llm_max_tokens_to_sample,
temperature=llm_temperature,
top_p=llm_top_p,
)
completion = llm_interface.get_completion(
prompt, generation_config
).replace("</s>", "")
return completion
except Exception as e:
logger.error(f"Completion generation failed: {e}")
return {"error": str(e)} |
Generates a completion for a given prompt using the SciPhi API.
Args:
prompt (str): The prompt for generating completion.
llm_model_name (str): The language model to use.
llm_max_tokens_to_sample (int): Maximum number of tokens for the sample.
llm_temperature (float): The temperature setting for the query.
llm_top_p (float): The top-p setting for the query.
Returns:
Dict: A dictionary containing the generated completion.
Raises:
ImportError: If the `sciphi-synthesizer` package is not installed.
| completion | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def process_rows(rows, output_queue):
"""Process the rows into qdrant point objects."""
qdrant_points = []
for row in rows:
_, url, __, text_chunks, embeddings_binary, ___, ____ = row
embeddings = np.frombuffer(
embeddings_binary, dtype=np.float32
).reshape(-1, EMBEDDING_VEC_SIZE)
text_chunks = json.loads(text_chunks)
# Prepare data for Qdrant
qdrant_points.append(
models.PointStruct(
id=str(uuid.uuid3(uuid.NAMESPACE_DNS, url)),
vector=[float(ele) for ele in embeddings[0]],
payload={"text": text_chunks[0], "url": url},
)
)
output_queue.put(qdrant_points) | Process the rows into qdrant point objects. | process_rows | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def qdrant_writer(config, qdrant_queue, delete_existing):
"""A writer that listens for output events in a separate thread."""
qclient = QdrantClient(
config["qdrant_host"],
port=config["qdrant_grpc_port"],
prefer_grpc=config["qdrant_prefer_grpc"],
)
if delete_existing:
qclient.delete_collection(config["qdrant_collection_name"])
create_collection(qclient, config["qdrant_collection_name"])
logger.info("Launching Qdrant writer")
while True:
try:
points = qdrant_queue.get()
logger.info(f"Starting Qdrant write-out...")
if points is None: # Sentinel to end the process
break
operation_result = qclient.upsert(
collection_name=config["qdrant_collection_name"],
wait=True,
points=points,
)
logger.info(
f"Finished Qdrant write-out with result {operation_result}..."
)
except Exception as e:
logger.info(f"Task failed with {e}") | A writer that listens for output events in a separate thread. | qdrant_writer | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def process_batches(config, start, end, batch_size, output_queue):
"""Processes the batches in steps of the given batch_size"""
# Connect to the database
conn = psycopg2.connect(
dbname=config["postgres_db"],
user=config["postgres_user"],
password=config["postgres_password"],
host=config["postgres_host"],
options="-c client_encoding=UTF8",
)
cur = conn.cursor()
# Declare a server-side cursor with offset
cur.execute(
f"DECLARE proc_cursor CURSOR FOR SELECT * FROM {config['postgres_table_name']} OFFSET {start} LIMIT {end - start}"
)
offset = start
while True:
logger.info(
f"Fetching a batch of size {batch_size} at offset {offset}"
)
# Fetch a batch of rows
cur.execute(f"FETCH {batch_size} FROM proc_cursor")
rows = cur.fetchall()
if len(rows) == 0:
break
process_rows(rows, output_queue)
offset += batch_size
# terminate
if offset + batch_size >= end:
break
cur.close()
conn.close() | Processes the batches in steps of the given batch_size | process_batches | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def run(self, num_processes=16, batch_size=1_024, delete_existing=False):
"""Runs the population process for the qdrant database"""
qdrant_queue = multiprocessing.Queue()
qdrant_writer_thread = multiprocessing.Process(
target=qdrant_writer,
args=(
self.config,
qdrant_queue,
delete_existing,
),
)
qdrant_writer_thread.start()
conn = psycopg2.connect(
dbname=self.config["postgres_db"],
user=self.config["postgres_user"],
password=self.config["postgres_password"],
host=self.config["postgres_host"],
options="-c client_encoding=UTF8",
)
cur = conn.cursor()
# Count total number of entries
cur.execute(
f"SELECT COUNT(*) FROM {self.config['postgres_table_name']}"
)
total_count = cur.fetchone()[0]
logger.info(
f"Processing {total_count} entries in {num_processes} processes"
)
range_size = total_count // num_processes
# Create and start multiprocessing workflow
processes = []
for i in range(num_processes):
logger.info(f"Starting process {i}...")
start = i * range_size
end = start + range_size if i < num_processes - 1 else total_count
proc = multiprocessing.Process(
target=process_batches,
args=(
self.config,
start,
end,
batch_size,
qdrant_queue,
),
)
processes.append(proc)
proc.start()
# Wait for all processes to finish
for proc in processes:
proc.join()
# send termination signal
qdrant_queue.put(None)
cur.close()
conn.close() | Runs the population process for the qdrant database | run | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def hierarchical_similarity_reranking(
self,
query_vector: np.ndarray,
urls: List[str],
limit: int = 100,
) -> List[AgentSearchResult]:
"""Hierarchical URL search to find the most similar text chunk for the given query and URLs"""
results = self.execute_batch_query(urls)
# List to store the results along with their similarity scores
similarity_results = []
# Iterate over each result to find the most similar text chunk
for result in results:
(
url,
title,
metadata,
dataset,
text_chunks_str,
embeddings_binary,
) = result
# deserialize the embeddings and text chunks
embeddings = np.frombuffer(
embeddings_binary, dtype=np.float32
).reshape(-1, 768)
text_chunks = json.loads(text_chunks_str)
max_similarity = -1e9
most_similar_chunk = None
# Iterate over each embedding to find the one with maximum cosine similarity
for chunk, embedding in zip(text_chunks, embeddings):
similarity = cosine_similarity(
np.array(query_vector), np.array(embedding)
)
if similarity > max_similarity:
max_similarity = similarity
most_similar_chunk = chunk
# Store the most similar chunk and its similarity score
similarity_results.append(
AgentSearchResult(
score=max_similarity,
url=url,
title=title,
metadata=json.loads(metadata),
dataset=dataset,
text=most_similar_chunk,
),
)
# Sort the results based on similarity score in descending order
similarity_results.sort(key=lambda x: x.score, reverse=True)
return similarity_results[:limit] | Hierarchical URL search to find the most similar text chunk for the given query and URLs | hierarchical_similarity_reranking | python | SciPhi-AI/agent-search | agent_search/search/base.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/search/base.py | Apache-2.0 |
def pagerank_reranking(
self,
similarity_results: List[AgentSearchResult],
limit: int = 100,
) -> List[AgentSearchResult]:
"""Reranks the results based on the PageRank score of the domain"""
if not self.pagerank_rerank_module:
raise Exception(
"PageRank reranking module is not enabled. Please set pagerank_rerank_module=True while initializing the WebSearchEngine client."
)
# List to store the results along with their PageRank scores
pagerank_results = []
# Iterate over each result to find the PageRank score of the domain
for result in similarity_results:
pagerank_score = 0
try:
domain = result.url.split("/")[2]
pagerank_score = self.domain_to_rank_map.get(domain, 0)
except Exception as e:
logger.info(f"Error {e}: Found for URL: {result.url}")
reweighted_score = (
self.pagerank_importance * pagerank_score / 10.0
+ (1 - self.pagerank_importance) * result.score
)
pagerank_results.append(
AgentSearchResult(
score=reweighted_score,
url=result.url,
title=result.title,
metadata=result.metadata,
dataset=result.dataset,
text=result.text,
)
)
# Sort the results based on PageRank score in descending order
pagerank_results.sort(key=lambda x: x.score, reverse=True)
return pagerank_results[:limit] | Reranks the results based on the PageRank score of the domain | pagerank_reranking | python | SciPhi-AI/agent-search | agent_search/search/base.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/search/base.py | Apache-2.0 |
def scrub_str(string):
"""
The purpose of this function is to scrub the weird template mark-up out of strings
that Veekun is using for their pokedex.
Example:
[]{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}.
Becomes:
dragon tail will effect the opponents HP.
If you find this results in weird strings please take a stab at improving or re-writing.
"""
groups = re.findall(GROUP_RGX, string)
for group in groups:
if group[0]:
sub = group[0]
else:
sub = group[1].split(":")
if len(sub) >= 2:
sub = sub[1]
else:
sub = sub[0]
sub = sub.replace("-", " ")
string = re.sub(SUB_RGX, sub, string, 1)
return string |
The purpose of this function is to scrub the weird template mark-up out of strings
that Veekun is using for their pokedex.
Example:
[]{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}.
Becomes:
dragon tail will effect the opponents HP.
If you find this results in weird strings please take a stab at improving or re-writing.
| scrub_str | python | PokeAPI/pokeapi | data/v2/build.py | https://github.com/PokeAPI/pokeapi/blob/master/data/v2/build.py | BSD-3-Clause |
def __SectionLength(this):
"""(4 bytes) Gets the length of characters the given section is"""
offset = this.__SectionDataOffset
return struct.unpack_from("<I", this.__data, offset)[0] | (4 bytes) Gets the length of characters the given section is | __SectionLength | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def __LineOffsets(this):
"""Figures out the offset for each entry based on the data section offset"""
result = [None] * this.__LineCount
sdo = int(this.__SectionDataOffset)
for i in range(0, len(result)):
result[i] = TextLine()
result[i].offset = struct.unpack_from("<i", this.__data, (i * 8) + sdo + 4)[0] + sdo
result[i].length = struct.unpack_from("<h", this.__data, (i * 8) + sdo + 8)[0]
return result | Figures out the offset for each entry based on the data section offset | __LineOffsets | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def HashFNV1_64(this, word):
"""Fowler-Noll-Vo hash function; 64-bit"""
fnvPrime_64 = 0x100000001b3
offsetBasis_64 = 0xCBF29CE484222645
hash = offsetBasis_64
for c in word:
hash = hash ^ ord(c)
# Cast hash to at 64-bit value
hash = (hash * fnvPrime_64) % 2**64
return hash | Fowler-Noll-Vo hash function; 64-bit | HashFNV1_64 | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def __LineData(this, data):
"""Loads the file into a list to later decrypt"""
key = copy.copy(this.__KEY_BASE)
result = [None] * this.__LineCount
lines = this.__LineOffsets
for i in range(0, len(lines)):
# Make a list twice the size of the current text line size
encrypted = lines[i].length * 2
# Then copy the encrypted line starting from the given offset for however long the given list is
end = lines[i].offset + encrypted
encrypted = this.__data[lines[i].offset:end]
result[i] = this.__CryptLineData(encrypted, key)
# Cast key to a 16-bits (otherwise things break)
key = (key + this.__KEY_ADVANCE) % 2**16
return result | Loads the file into a list to later decrypt | __LineData | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def __CryptLineData(this, data, key):
"""Decrypts the given line into a list of bytes"""
copied = copy.copy(data)
result = [None] * len(copied)
for i in range(0, len(copied), 2):
result[i] = copied[i] ^ (key % 256)
result[i + 1] = copied[i + 1] ^ ((key >> 8) % 256)
# Bit-shift and OR key, then cast to 16-bits (otherwise things break)
key = (key << 3 | key >> 13) % 2**16
return result | Decrypts the given line into a list of bytes | __CryptLineData | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.