python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create different kinds of systems."""
from typing import Sequence
import attr
from ferminet.utils import elements
from ferminet.utils import units as unit_conversion
import numpy as np
# Default bond lengths in angstrom for some diatomics.
# Bond lengths from either the G3 dataset:
# 1. http://www.cse.anl.gov/OldCHMwebsiteContent/compmat/comptherm.htm
# 2. L. A. Curtiss, P. C. Redfern, K. Raghavachari, and J. A. Pople,
# J. Chem. Phys, 109, 42 (1998).
# or from NIST (https://cccbdb.nist.gov/diatomicexpbondx.asp).
diatomic_bond_lengths = {
'BeH': 1.348263,
'CN': 1.134797,
'ClF': 1.659091,
'F2': 1.420604,
'H2': 0.737164,
'HCl': 1.2799799,
'Li2': 2.77306,
'LiH': 1.639999,
'N2': 1.129978,
'NH': 1.039428,
'CO': 1.150338,
'BH': 1.2324,
'PN': 1.491,
'AlH': 1.648,
'AlN': 1.786,
}
# Default spin polarisation for a few diatomics of interest.
# Otherwise default to either singlet (doublet) for even (odd) numbers of
# electrons. Units: number of unpaired electrons.
diatomic_spin_polarisation = {
'B2': 2,
'O2': 2,
'NH': 2,
'AlN': 2,
}
@attr.s
class Atom: # pytype: disable=invalid-function-definition
"""Atom information for Hamiltonians.
The nuclear charge is inferred from the symbol if not given, in which case the
symbol must be the IUPAC symbol of the desired element.
Attributes:
symbol: Element symbol.
coords: An iterable of atomic coordinates. Always a list of floats and in
bohr after initialisation. Default: place atom at origin.
charge: Nuclear charge. Default: nuclear charge (atomic number) of atom of
the given name.
atomic_number: Atomic number associated with element. Default: atomic number
of element of the given symbol. Should match charge unless fractional
nuclear charges are being used.
units: String giving units of coords. Either bohr or angstrom. Default:
bohr. If angstrom, coords are converted to be in bohr and units to the
string 'bohr'.
coords_angstrom: list of atomic coordinates in angstrom.
coords_array: Numpy array of atomic coordinates in bohr.
element: elements.Element corresponding to the symbol.
"""
symbol = attr.ib()
coords = attr.ib(
converter=lambda xs: tuple(float(x) for x in xs),
default=(0.0, 0.0, 0.0)) # type: Sequence[float]
charge = attr.ib(converter=float)
atomic_number = attr.ib(converter=int)
units = attr.ib(
default='bohr', validator=attr.validators.in_(['bohr', 'angstrom']))
@charge.default
def _set_default_charge(self):
return self.element.atomic_number
@atomic_number.default
def _set_default_atomic_number(self):
return self.element.atomic_number
def __attrs_post_init__(self):
if self.units == 'angstrom':
self.coords = [unit_conversion.angstrom2bohr(x) for x in self.coords]
self.units = 'bohr'
@property
def coords_angstrom(self):
return [unit_conversion.bohr2angstrom(x) for x in self.coords]
@property
def coords_array(self):
if not hasattr(self, '_coords_arr'):
self._coords_arr = np.array(self.coords)
return self._coords_arr
@property
def element(self):
return elements.SYMBOLS[self.symbol]
def atom(symbol, spins=None, charge=0):
"""Return configuration for a single atom.
Args:
symbol: The atomic symbol from the periodic table
spins (optional): A tuple with the number of spin-up and spin-down electrons
charge (optional): If zero (default), create a neutral atom, otherwise
create an anion if charge is negative or cation if charge is positive.
Returns:
A list with a single Atom object located at zero, and a tuple with the spin
configuration of the electrons.
"""
atomic_number = elements.SYMBOLS[symbol].atomic_number
if charge > atomic_number:
raise ValueError('Cannot have a cation with charge larger than the '
'atomic number. Charge: {}, Atomic Number{}'.format(
charge, atomic_number))
if spins is None:
spin_polarisation = elements.ATOMIC_NUMS[atomic_number-charge].spin_config
nalpha = (atomic_number + spin_polarisation) // 2
spins = (nalpha, atomic_number - charge - nalpha)
return [Atom(symbol=symbol, coords=(0.0, 0.0, 0.0))], spins
def diatomic(symbol1, symbol2, bond_length, spins=None, charge=0, units='bohr'):
"""Return configuration for a diatomic molecule."""
if spins is None:
atomic_number_1 = elements.SYMBOLS[symbol1].atomic_number
atomic_number_2 = elements.SYMBOLS[symbol2].atomic_number
total_charge = atomic_number_1 + atomic_number_2 - charge
if total_charge % 2 == 0:
spins = (total_charge // 2, total_charge // 2)
else:
spins = ((total_charge + 1)// 2, (total_charge - 1) // 2)
return [
Atom(symbol=symbol1, coords=(0.0, 0.0, bond_length/2.0), units=units),
Atom(symbol=symbol2, coords=(0.0, 0.0, -bond_length/2.0), units=units)
], spins
def molecule(symbol, bond_length=0.0, units='bohr'):
"""Hardcoded molecular geometries from the original Fermi Net paper."""
if symbol in diatomic_bond_lengths:
if symbol[-1] == '2':
symbs = [symbol[:-1], symbol[:-1]]
else: # Split a camel-case string on the second capital letter
split_idx = None
for i in range(1, len(symbol)):
if split_idx is None and symbol[i].isupper():
split_idx = i
if split_idx is None:
raise ValueError('Cannot find second atomic symbol: {}'.format(symbol))
symbs = [symbol[:split_idx], symbol[split_idx:]]
atomic_number_1 = elements.SYMBOLS[symbs[0]].atomic_number
atomic_number_2 = elements.SYMBOLS[symbs[1]].atomic_number
total_charge = atomic_number_1 + atomic_number_2
if symbol in diatomic_spin_polarisation:
spin_pol = diatomic_spin_polarisation[symbol]
spins = ((total_charge + spin_pol) // 2, (total_charge + spin_pol) // 2)
elif total_charge % 2 == 0:
spins = (total_charge // 2, total_charge // 2)
else:
spins = ((total_charge + 1)// 2, (total_charge - 1) // 2)
if bond_length == 0.0:
bond_length = diatomic_bond_lengths[symbol]
units = 'angstrom'
return diatomic(symbs[0], symbs[1],
bond_length,
units=units,
spins=spins)
if bond_length != 0.0:
raise ValueError('Bond length argument only appropriate for diatomics.')
if symbol == 'CH4':
return [
Atom(symbol='C', coords=(0.0, 0.0, 0.0), units='bohr'),
Atom(symbol='H', coords=(1.18886, 1.18886, 1.18886), units='bohr'),
Atom(symbol='H', coords=(-1.18886, -1.18886, 1.18886), units='bohr'),
Atom(symbol='H', coords=(1.18886, -1.18886, -1.18886), units='bohr'),
Atom(symbol='H', coords=(-1.18886, 1.18886, -1.18886), units='bohr'),
], (5, 5)
if symbol == 'NH3':
return [
Atom(symbol='N', coords=(0.0, 0.0, 0.22013), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.77583, -0.51364), units='bohr'),
Atom(symbol='H', coords=(1.53791, -0.88791, -0.51364), units='bohr'),
Atom(symbol='H', coords=(-1.53791, -0.88791, -0.51364), units='bohr'),
], (5, 5)
if symbol in ('C2H4', 'ethene', 'ethylene'):
return [
Atom(symbol='C', coords=(0.0, 0.0, 1.26135), units='bohr'),
Atom(symbol='C', coords=(0.0, 0.0, -1.26135), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.74390, 2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, -1.74390, 2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.74390, -2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, -1.74390, -2.33889), units='bohr'),
], (8, 8)
if symbol in ('C4H6', 'bicyclobutane'):
return [
Atom(symbol='C', coords=(0.0, 2.13792, 0.58661), units='bohr'),
Atom(symbol='C', coords=(0.0, -2.13792, 0.58661), units='bohr'),
Atom(symbol='C', coords=(1.41342, 0.0, -0.58924), units='bohr'),
Atom(symbol='C', coords=(-1.41342, 0.0, -0.58924), units='bohr'),
Atom(symbol='H', coords=(0.0, 2.33765, 2.64110), units='bohr'),
Atom(symbol='H', coords=(0.0, 3.92566, -0.43023), units='bohr'),
Atom(symbol='H', coords=(0.0, -2.33765, 2.64110), units='bohr'),
Atom(symbol='H', coords=(0.0, -3.92566, -0.43023), units='bohr'),
Atom(symbol='H', coords=(2.67285, 0.0, -2.19514), units='bohr'),
Atom(symbol='H', coords=(-2.67285, 0.0, -2.19514), units='bohr'),
], (15, 15)
raise ValueError('Not a recognized molecule: {}'.format(symbol))
def hn(n, r, charge=0, units='bohr'):
"""Return a hydrogen chain with n atoms and separation r."""
m = n - charge # number of electrons
if m % 2 == 0:
spins = (m//2, m//2)
else:
spins = ((m+1)//2, (m-1)//2)
lim = r * (n-1) / 2.0
return [Atom(symbol='H', coords=(0.0, 0.0, z), units=units)
for z in np.linspace(-lim, lim, n)], spins
def h4_circle(r, theta, units='bohr'):
"""Return 4 hydrogen atoms arranged in a circle, a failure case of CCSD(T)."""
return [
Atom(symbol='H',
coords=(r*np.cos(theta), r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(-r*np.cos(theta), r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(r*np.cos(theta), -r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(-r*np.cos(theta), -r*np.sin(theta), 0.0),
units=units)
], (2, 2)
| ferminet-master | ferminet/utils/system.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic definition of units and converters useful for chemistry."""
from typing import Union
import numpy as np
# 1 Bohr = 0.52917721067 (12) x 10^{-10} m
# https://physics.nist.gov/cgi-bin/cuu/Value?bohrrada0
# Note: pyscf uses a slightly older definition of 0.52917721092 angstrom.
ANGSTROM_BOHR = 0.52917721067
BOHR_ANGSTROM = 1. / ANGSTROM_BOHR
# 1 Hartree = 627.509474 kcal/mol
# https://en.wikipedia.org/wiki/Hartree
KCAL_HARTREE = 627.509474
HARTREE_KCAL = 1. / KCAL_HARTREE
def bohr2angstrom(x_b: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x_b * ANGSTROM_BOHR
def angstrom2bohr(x_a: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x_a * BOHR_ANGSTROM
def hartree2kcal(x_b: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x_b * KCAL_HARTREE
def kcal2hartree(x_a: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x_a * HARTREE_KCAL
| ferminet-master | ferminet/utils/units.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic data on chemical elements."""
import collections
from typing import Optional
import attr
@attr.s
class Element(object):
"""Chemical element.
Attributes:
symbol: official symbol of element.
atomic_number: atomic number of element.
period: period to which the element belongs.
spin: overrides default ground-state spin-configuration based on the
element's group (main groups only).
"""
symbol: str = attr.ib()
atomic_number: int = attr.ib()
period: int = attr.ib()
_spin: Optional[int] = attr.ib(default=None, repr=False)
@property
def group(self) -> int:
"""Group to which element belongs. Set to -1 for actines and lanthanides."""
is_lanthanide = (58 <= self.atomic_number <= 71)
is_actinide = (90 <= self.atomic_number <= 103)
if is_lanthanide or is_actinide:
return -1
if self.symbol == 'He':
# n=1 shell only has s orbital -> He is a noble gas.
return 18
period_starts = (1, 3, 11, 19, 37, 55, 87)
period_start = period_starts[self.period - 1]
group_ = self.atomic_number - period_start + 1
# Adjust for absence of d block in periods 2 and 3.
if self.period < 4 and group_ > 2:
group_ += 10
# Adjust for Lanthanides and Actinides in periods 6 and 7.
if self.period >= 6 and group_ > 3:
group_ -= 14
return group_
@property
def spin_config(self) -> int:
"""Canonical spin configuration (via Hund's rules) of neutral atom.
Returns:
Number of unpaired electrons (as required by PySCF) in the neutral atom's
ground state.
Raises:
NotImplementedError: if element is a transition metal and the spin
configuration is not set at initialization.
"""
if self._spin is not None:
return self._spin
unpaired = {1: 1, 2: 0, 13: 1, 14: 2, 15: 3, 16: 2, 17: 1, 18: 0}
if self.group in unpaired:
return unpaired[self.group]
else:
raise NotImplementedError(
'Spin configuration for transition metals not set.')
@property
def nalpha(self) -> int:
"""Returns the number of alpha electrons of the ground state neutral atom.
Without loss of generality, the number of alpha electrons is taken to be
equal to or greater than the number of beta electrons.
"""
electrons = self.atomic_number
unpaired = self.spin_config
return (electrons + unpaired) // 2
@property
def nbeta(self) -> int:
"""Returns the number of beta electrons of the ground state neutral atom.
Without loss of generality, the number of alpha electrons is taken to be
equal to or greater than the number of beta electrons.
"""
electrons = self.atomic_number
unpaired = self.spin_config
return (electrons - unpaired) // 2
# Atomic symbols for all known elements
# Generated using
# def _element(symbol, atomic_number):
# # period_start[n] = atomic number of group 1 element in (n+1)-th period.
# period_start = (1, 3, 11, 19, 37, 55, 87)
# for p, group1_no in enumerate(period_start):
# if atomic_number < group1_no:
# # In previous period but n is 0-based.
# period = p
# break
# else:
# period = p + 1
# return Element(symbol=symbol, atomic_number=atomic_number, period=period)
# [_element(s, n+1) for n, s in enumerate(symbols)]
# where symbols is the list of chemical symbols of all elements.
_ELEMENTS = (
Element(symbol='H', atomic_number=1, period=1),
Element(symbol='He', atomic_number=2, period=1),
Element(symbol='Li', atomic_number=3, period=2),
Element(symbol='Be', atomic_number=4, period=2),
Element(symbol='B', atomic_number=5, period=2),
Element(symbol='C', atomic_number=6, period=2),
Element(symbol='N', atomic_number=7, period=2),
Element(symbol='O', atomic_number=8, period=2),
Element(symbol='F', atomic_number=9, period=2),
Element(symbol='Ne', atomic_number=10, period=2),
Element(symbol='Na', atomic_number=11, period=3),
Element(symbol='Mg', atomic_number=12, period=3),
Element(symbol='Al', atomic_number=13, period=3),
Element(symbol='Si', atomic_number=14, period=3),
Element(symbol='P', atomic_number=15, period=3),
Element(symbol='S', atomic_number=16, period=3),
Element(symbol='Cl', atomic_number=17, period=3),
Element(symbol='Ar', atomic_number=18, period=3),
Element(symbol='K', atomic_number=19, period=4),
Element(symbol='Ca', atomic_number=20, period=4),
Element(symbol='Sc', atomic_number=21, period=4, spin=1),
Element(symbol='Ti', atomic_number=22, period=4, spin=2),
Element(symbol='V', atomic_number=23, period=4, spin=3),
Element(symbol='Cr', atomic_number=24, period=4, spin=6),
Element(symbol='Mn', atomic_number=25, period=4, spin=5),
Element(symbol='Fe', atomic_number=26, period=4, spin=4),
Element(symbol='Co', atomic_number=27, period=4, spin=3),
Element(symbol='Ni', atomic_number=28, period=4, spin=2),
Element(symbol='Cu', atomic_number=29, period=4, spin=1),
Element(symbol='Zn', atomic_number=30, period=4, spin=0),
Element(symbol='Ga', atomic_number=31, period=4),
Element(symbol='Ge', atomic_number=32, period=4),
Element(symbol='As', atomic_number=33, period=4),
Element(symbol='Se', atomic_number=34, period=4),
Element(symbol='Br', atomic_number=35, period=4),
Element(symbol='Kr', atomic_number=36, period=4),
Element(symbol='Rb', atomic_number=37, period=5),
Element(symbol='Sr', atomic_number=38, period=5),
Element(symbol='Y', atomic_number=39, period=5, spin=1),
Element(symbol='Zr', atomic_number=40, period=5, spin=2),
Element(symbol='Nb', atomic_number=41, period=5, spin=5),
Element(symbol='Mo', atomic_number=42, period=5, spin=6),
Element(symbol='Tc', atomic_number=43, period=5, spin=5),
Element(symbol='Ru', atomic_number=44, period=5, spin=4),
Element(symbol='Rh', atomic_number=45, period=5, spin=3),
Element(symbol='Pd', atomic_number=46, period=5, spin=0),
Element(symbol='Ag', atomic_number=47, period=5, spin=1),
Element(symbol='Cd', atomic_number=48, period=5, spin=0),
Element(symbol='In', atomic_number=49, period=5),
Element(symbol='Sn', atomic_number=50, period=5),
Element(symbol='Sb', atomic_number=51, period=5),
Element(symbol='Te', atomic_number=52, period=5),
Element(symbol='I', atomic_number=53, period=5),
Element(symbol='Xe', atomic_number=54, period=5),
Element(symbol='Cs', atomic_number=55, period=6),
Element(symbol='Ba', atomic_number=56, period=6),
Element(symbol='La', atomic_number=57, period=6),
Element(symbol='Ce', atomic_number=58, period=6),
Element(symbol='Pr', atomic_number=59, period=6),
Element(symbol='Nd', atomic_number=60, period=6),
Element(symbol='Pm', atomic_number=61, period=6),
Element(symbol='Sm', atomic_number=62, period=6),
Element(symbol='Eu', atomic_number=63, period=6),
Element(symbol='Gd', atomic_number=64, period=6),
Element(symbol='Tb', atomic_number=65, period=6),
Element(symbol='Dy', atomic_number=66, period=6),
Element(symbol='Ho', atomic_number=67, period=6),
Element(symbol='Er', atomic_number=68, period=6),
Element(symbol='Tm', atomic_number=69, period=6),
Element(symbol='Yb', atomic_number=70, period=6),
Element(symbol='Lu', atomic_number=71, period=6),
Element(symbol='Hf', atomic_number=72, period=6),
Element(symbol='Ta', atomic_number=73, period=6),
Element(symbol='W', atomic_number=74, period=6),
Element(symbol='Re', atomic_number=75, period=6),
Element(symbol='Os', atomic_number=76, period=6),
Element(symbol='Ir', atomic_number=77, period=6),
Element(symbol='Pt', atomic_number=78, period=6),
Element(symbol='Au', atomic_number=79, period=6),
Element(symbol='Hg', atomic_number=80, period=6),
Element(symbol='Tl', atomic_number=81, period=6),
Element(symbol='Pb', atomic_number=82, period=6),
Element(symbol='Bi', atomic_number=83, period=6),
Element(symbol='Po', atomic_number=84, period=6),
Element(symbol='At', atomic_number=85, period=6),
Element(symbol='Rn', atomic_number=86, period=6),
Element(symbol='Fr', atomic_number=87, period=7),
Element(symbol='Ra', atomic_number=88, period=7),
Element(symbol='Ac', atomic_number=89, period=7),
Element(symbol='Th', atomic_number=90, period=7),
Element(symbol='Pa', atomic_number=91, period=7),
Element(symbol='U', atomic_number=92, period=7),
Element(symbol='Np', atomic_number=93, period=7),
Element(symbol='Pu', atomic_number=94, period=7),
Element(symbol='Am', atomic_number=95, period=7),
Element(symbol='Cm', atomic_number=96, period=7),
Element(symbol='Bk', atomic_number=97, period=7),
Element(symbol='Cf', atomic_number=98, period=7),
Element(symbol='Es', atomic_number=99, period=7),
Element(symbol='Fm', atomic_number=100, period=7),
Element(symbol='Md', atomic_number=101, period=7),
Element(symbol='No', atomic_number=102, period=7),
Element(symbol='Lr', atomic_number=103, period=7),
Element(symbol='Rf', atomic_number=104, period=7),
Element(symbol='Db', atomic_number=105, period=7),
Element(symbol='Sg', atomic_number=106, period=7),
Element(symbol='Bh', atomic_number=107, period=7),
Element(symbol='Hs', atomic_number=108, period=7),
Element(symbol='Mt', atomic_number=109, period=7),
Element(symbol='Ds', atomic_number=110, period=7),
Element(symbol='Rg', atomic_number=111, period=7),
Element(symbol='Cn', atomic_number=112, period=7),
Element(symbol='Nh', atomic_number=113, period=7),
Element(symbol='Fl', atomic_number=114, period=7),
Element(symbol='Mc', atomic_number=115, period=7),
Element(symbol='Lv', atomic_number=116, period=7),
Element(symbol='Ts', atomic_number=117, period=7),
Element(symbol='Og', atomic_number=118, period=7),
)
ATOMIC_NUMS = {element.atomic_number: element for element in _ELEMENTS}
# Lookup by symbol instead of atomic number.
SYMBOLS = {element.symbol: element for element in _ELEMENTS}
# Lookup by period.
PERIODS = collections.defaultdict(list)
for element in _ELEMENTS:
PERIODS[element.period].append(element)
PERIODS = {period: tuple(elements) for period, elements in PERIODS.items()}
| ferminet-master | ferminet/utils/elements.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ferminet-master | ferminet/utils/__init__.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writer utility classes."""
import contextlib
import os
from typing import Mapping, Optional, Sequence
from absl import logging
import tables
class Writer(contextlib.AbstractContextManager):
"""Write data to CSV, as well as logging data to stdout if desired."""
def __init__(self,
name: str,
schema: Sequence[str],
directory: str = 'logs/',
iteration_key: Optional[str] = 't',
log: bool = True):
"""Initialise Writer.
Args:
name: file name for CSV.
schema: sequence of keys, corresponding to each data item.
directory: directory path to write file to.
iteration_key: if not None or a null string, also include the iteration
index as the first column in the CSV output with the given key.
log: Also log each entry to stdout.
"""
self._schema = schema
if not os.path.isdir(directory):
os.mkdir(directory)
self._filename = os.path.join(directory, name + '.csv')
self._iteration_key = iteration_key
self._log = log
def __enter__(self):
self._file = open(self._filename, 'w')
# write top row of csv
if self._iteration_key:
self._file.write(f'{self._iteration_key},')
self._file.write(','.join(self._schema) + '\n')
return self
def write(self, t: int, **data):
"""Writes to file and stdout.
Args:
t: iteration index.
**data: data items with keys as given in schema.
"""
row = [str(data.get(key, '')) for key in self._schema]
if self._iteration_key:
row.insert(0, str(t))
for key in data:
if key not in self._schema:
raise ValueError('Not a recognized key for writer: %s' % key)
# write the data to csv
self._file.write(','.join(row) + '\n')
# write the data to abseil logs
if self._log:
logging.info('Iteration %s: %s', t, data)
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
class H5Writer(contextlib.AbstractContextManager):
"""Write data to HDF5 files."""
def __init__(self,
name: str,
schema: Mapping[str, Sequence[int]],
directory: str = '',
index_key: str = 't',
compression_level: int = 5):
"""Initialise H5Writer.
Args:
name: file name for CSV.
schema: dict of keys, corresponding to each data item . All data is
assumed ot be 32-bit floats.
directory: directory path to write file to.
index_key: name of (integer) key used to index each entry.
compression_level: compression level (0-9) used to compress HDF5 file.
"""
self._path = os.path.join(directory, name)
self._schema = schema
self._index_key = index_key
self._description = {}
self._file = None
self._complevel = compression_level
def __enter__(self):
if not self._schema:
return self
pos = 1
self._description[self._index_key] = tables.Int32Col(pos=pos)
for key, shape in self._schema.items():
pos += 1
self._description[key] = tables.Float32Col(pos=pos, shape=shape)
if not os.path.isdir(os.path.dirname(self._path)):
os.mkdir(os.path.dirname(self._path))
self._file = tables.open_file(
self._path,
mode='w',
title='Fermi Net Data',
filters=tables.Filters(complevel=self._complevel))
self._table = self._file.create_table(
where=self._file.root, name='data', description=self._description)
return self
def write(self, index: int, data):
"""Write data to HDF5 file.
Args:
index: iteration index.
data: dict of arrays to write to file. Only elements with keys in the
schema are written.
"""
if self._file:
h5_data = (index,)
for key in self._description:
if key != self._index_key:
h5_data += (data[key],)
self._table.append([h5_data])
self._table.flush()
def __exit__(self, exc_type, exc_val, exc_tb):
if self._file:
self._file.close()
self._file = None
| ferminet-master | ferminet/utils/writers.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interaction with Hartree-Fock solver in pyscf."""
# Abbreviations used:
# SCF: self-consistent field (method). Another name for Hartree-Fock
# HF: Hartree-Fock method.
# RHF: restricted Hartre-Fock. Require molecular orbital for the i-th alpha-spin
# and i-th beta-spin electrons to have the same spatial component.
# ROHF: restricted open-shell Hartree-Fock. Same as RHF except allows the number
# of alpha and beta electrons to differ.
# UHF: unrestricted Hartre-Fock. Permits breaking of spin symmetry and hence
# alpha and beta electrons to have different spatial components.
# AO: Atomic orbital. Underlying basis set (typically Gaussian-type orbitals and
# built into pyscf).
# MO: molecular orbitals/Hartree-Fock orbitals. Single-particle orbitals which
# are solutions to the Hartree-Fock equations.
from typing import Sequence, Tuple, Optional
from absl import logging
from ferminet.utils import system
import numpy as np
import pyscf
class Scf:
"""Helper class for running Hartree-Fock (self-consistent field) with pyscf.
Attributes:
molecule: list of system.Atom objects giving the atoms in the
molecule and their positions.
nelectrons: Tuple with number of alpha electrons and beta
electrons.
basis: Basis set to use, best specified with the relevant string
for a built-in basis set in pyscf. A user-defined basis set can be used
(advanced). See https://sunqm.github.io/pyscf/gto.html#input-basis for
more details.
pyscf_mol: the PySCF 'Molecule'. If this is passed to the init,
the molecule, nelectrons, and basis will not be used, and the
calculations will be performed on the existing pyscf_mol
restricted: If true, use the restriced Hartree-Fock method, otherwise use
the unrestricted Hartree-Fock method.
"""
def __init__(self,
molecule: Optional[Sequence[system.Atom]] = None,
nelectrons: Optional[Tuple[int, int]] = None,
basis: Optional[str] = 'cc-pVTZ',
pyscf_mol: Optional[pyscf.gto.Mole] = None,
restricted: bool = True):
if pyscf_mol:
self._mol = pyscf_mol
else:
self.molecule = molecule
self.nelectrons = nelectrons
self.basis = basis
self._spin = nelectrons[0] - nelectrons[1]
self._mol = None
self.restricted = restricted
self._mean_field = None
pyscf.lib.param.TMPDIR = None
def run(self):
"""Runs the Hartree-Fock calculation.
Returns:
A pyscf scf object (i.e. pyscf.scf.rhf.RHF, pyscf.scf.uhf.UHF or
pyscf.scf.rohf.ROHF depending on the spin and restricted settings).
Raises:
RuntimeError: If the number of electrons in the PySCF molecule is not
consistent with self.nelectrons.
"""
# If not passed a pyscf molecule, create one
if not self._mol:
if any(atom.atomic_number - atom.charge > 1.e-8
for atom in self.molecule):
logging.info(
'Fractional nuclear charge detected. '
'Running SCF on atoms with integer charge.'
)
nuclear_charge = sum(atom.atomic_number for atom in self.molecule)
charge = nuclear_charge - sum(self.nelectrons)
self._mol = pyscf.gto.Mole(
atom=[[atom.symbol, atom.coords] for atom in self.molecule],
unit='bohr')
self._mol.basis = self.basis
self._mol.spin = self._spin
self._mol.charge = charge
self._mol.build()
if self._mol.nelectron != sum(self.nelectrons):
raise RuntimeError('PySCF molecule not consistent with QMC molecule.')
if self.restricted:
self._mean_field = pyscf.scf.RHF(self._mol)
else:
self._mean_field = pyscf.scf.UHF(self._mol)
self._mean_field.init_guess = 'atom'
self._mean_field.kernel()
return self._mean_field
def eval_mos(self, positions: np.ndarray,
deriv: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""Evaluates the Hartree-Fock single-particle orbitals at a set of points.
Args:
positions: numpy array of shape (N, 3) of the positions in space at which
to evaluate the Hartree-Fock orbitals.
deriv: If True, also calculate the first derivatives of the
single-particle orbitals.
Returns:
Pair of numpy float64 arrays of shape (N, M) (deriv=False) or (4, N, M)
(deriv=True), where 2M is the number of Hartree-Fock orbitals. The (i-th,
j-th) element in the first (second) array gives the value of the j-th
alpha (beta) Hartree-Fock orbital at the i-th electron position in
positions. For restricted (RHF, ROHF) calculations, the two arrays will be
identical.
If deriv=True, the first index contains [value, x derivative, y
derivative, z derivative].
Raises:
RuntimeError: If Hartree-Fock calculation has not been performed using
`run`.
NotImplementedError: If Hartree-Fock calculation used Cartesian
Gaussian-type orbitals as the underlying basis set.
"""
if self._mean_field is None:
raise RuntimeError('Mean-field calculation has not been run.')
if self.restricted:
coeffs = (self._mean_field.mo_coeff,)
else:
coeffs = self._mean_field.mo_coeff
# Assumes self._mol.cart (use of Cartesian Gaussian-type orbitals and
# integrals) is False (default behaviour of pyscf).
if self._mol.cart:
raise NotImplementedError(
'Evaluation of molecular orbitals using cartesian GTOs.')
# Note sph refers to the use of spherical GTO basis sets rather than
# Cartesian GO basis sets. The coordinate system used for the electron
# positions is Cartesian in both cases.
gto_op = 'GTOval_sph_deriv1' if deriv else 'GTOval_sph'
ao_values = self._mol.eval_gto(gto_op, positions)
mo_values = tuple(np.matmul(ao_values, coeff) for coeff in coeffs)
if self.restricted:
# duplicate for beta electrons.
mo_values *= 2
return mo_values
| ferminet-master | ferminet/utils/scf.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.utils.scf."""
from typing import List, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from ferminet.utils import scf
from ferminet.utils import system
import numpy as np
import pyscf
class ScfTest(parameterized.TestCase):
def setUp(self):
super(ScfTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(
{
'molecule': [system.Atom('He', (0, 0, 0))],
'nelectrons': (1, 1)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (5, 2)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (5, 3)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (4, 2)
},
{
'molecule': [system.Atom('O', (0, 0, 0))],
'nelectrons': (5, 3),
'restricted': False,
},
{
'molecule': [
system.Atom('N', (0, 0, 0)),
system.Atom('N', (0, 0, 1.4))
],
'nelectrons': (7, 7)
},
{
'molecule': [
system.Atom('O', (0, 0, 0)),
system.Atom('O', (0, 0, 1.4))
],
'nelectrons': (9, 7),
'restricted': False,
},
)
def test_scf_interface(self,
molecule: List[system.Atom],
nelectrons: Tuple[int, int],
restricted: bool = True):
"""Tests SCF interface to a pyscf calculation.
pyscf has its own tests so only check that we can run calculations over
atoms and simple diatomics using the interface in ferminet.scf.
Args:
molecule: List of system.Atom objects giving atoms in the molecule.
nelectrons: Tuple containing number of alpha and beta electrons.
restricted: If true, run a restricted Hartree-Fock calculation, otherwise
run an unrestricted Hartree-Fock calculation.
"""
npts = 100
xs = np.random.randn(npts, 3)
hf = scf.Scf(molecule=molecule,
nelectrons=nelectrons,
restricted=restricted)
hf.run()
mo_vals = hf.eval_mos(xs)
self.assertLen(mo_vals, 2) # alpha-spin orbitals and beta-spin orbitals.
for spin_mo_vals in mo_vals:
# Evalute npts points on M orbitals/functions - (npts, M) array.
self.assertEqual(spin_mo_vals.shape, (npts, hf._mol.nao_nr()))
if __name__ == '__main__':
absltest.main()
| ferminet-master | ferminet/utils/tests/base_scf_test.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.utils.elements."""
from absl.testing import absltest
from absl.testing import parameterized
from ferminet.utils import elements
class ElementsTest(parameterized.TestCase):
def test_elements(self):
for n, element in elements.ATOMIC_NUMS.items():
self.assertEqual(n, element.atomic_number)
self.assertEqual(elements.SYMBOLS[element.symbol], element)
if element.symbol in ['Li', 'Na', 'K', 'Rb', 'Cs', 'Fr']:
self.assertEqual(element.period, elements.ATOMIC_NUMS[n - 1].period + 1)
elif element.symbol != 'H':
self.assertEqual(element.period, elements.ATOMIC_NUMS[n - 1].period)
self.assertCountEqual(
(element.symbol for element in elements.ATOMIC_NUMS.values()),
elements.SYMBOLS.keys())
self.assertCountEqual(
(element.atomic_number for element in elements.SYMBOLS.values()),
elements.ATOMIC_NUMS.keys())
@parameterized.parameters(
(elements.SYMBOLS['H'], 1, 1, 1, 1, 0),
(elements.SYMBOLS['He'], 1, 18, 0, 1, 1),
(elements.SYMBOLS['Li'], 2, 1, 1, 2, 1),
(elements.SYMBOLS['Be'], 2, 2, 0, 2, 2),
(elements.SYMBOLS['C'], 2, 14, 2, 4, 2),
(elements.SYMBOLS['N'], 2, 15, 3, 5, 2),
(elements.SYMBOLS['Al'], 3, 13, 1, 7, 6),
(elements.SYMBOLS['Zn'], 4, 12, 0, 15, 15),
(elements.SYMBOLS['Ga'], 4, 13, 1, 16, 15),
(elements.SYMBOLS['Kr'], 4, 18, 0, 18, 18),
(elements.SYMBOLS['Ce'], 6, -1, -1, None, None),
(elements.SYMBOLS['Ac'], 7, 3, -1, None, None),
)
def test_element_group_period(self, element, period, group, spin_config,
nalpha, nbeta):
# Validate subset of elements. See below for more thorough tests using
# properties of the periodic table.
with self.subTest('Verify period'):
self.assertEqual(element.period, period)
with self.subTest('Verify group'):
self.assertEqual(element.group, group)
with self.subTest('Verify spin configuration'):
if (element.period > 5 and
(element.group == -1 or 3 <= element.group <= 12)):
with self.assertRaises(NotImplementedError):
_ = element.spin_config
else:
self.assertEqual(element.spin_config, spin_config)
with self.subTest('Verify electrons per spin'):
if (element.period > 5 and
(element.group == -1 or 3 <= element.group <= 12)):
with self.assertRaises(NotImplementedError):
_ = element.nalpha
with self.assertRaises(NotImplementedError):
_ = element.nbeta
else:
self.assertEqual(element.nalpha, nalpha)
self.assertEqual(element.nbeta, nbeta)
def test_periods(self):
self.assertLen(elements.ATOMIC_NUMS,
sum(len(period) for period in elements.PERIODS.values()))
period_length = {1: 2, 2: 8, 3: 8, 4: 18, 5: 18, 6: 32, 7: 32}
for p, es in elements.PERIODS.items():
self.assertLen(es, period_length[p])
def test_groups(self):
# Atomic numbers of first element in each period.
period_starts = sorted([
period_elements[0].atomic_number
for period_elements in elements.PERIODS.values()
])
# Iterate over all elements in order of atomic number. Group should
# increment monotonically (except for accommodating absence of d block and
# presence of f block) and reset to 1 on the first element in each period.
for i in range(1, len(elements.ATOMIC_NUMS)+1):
element = elements.ATOMIC_NUMS[i]
if element.atomic_number in period_starts:
prev_group = 0
fblock = 0
if element.symbol == 'He':
# Full shell, not just full s subshell.
self.assertEqual(element.group, 18)
elif element.group == -1:
# Found a lanthanide (period 6) or actinide (period 7).
self.assertIn(element.period, [6, 7])
fblock += 1
elif element.atomic_number == 5 or element.atomic_number == 13:
# No d block (10 elements, groups 3-12) in periods 2 and 3.
self.assertEqual(element.group, prev_group + 11)
else:
# Group should increment monotonically.
self.assertEqual(element.group, prev_group + 1)
if element.group != -1:
prev_group = element.group
self.assertGreaterEqual(prev_group, 1)
self.assertLessEqual(prev_group, 18)
if element.group == 4 and element.period > 6:
# Should have seen 14 lanthanides (period 6) or 14 actinides (period 7).
self.assertEqual(fblock, 14)
# The periodic table (up to element 118) contains 7 periods.
# Hydrogen and Helium are placed in groups 1 and 18 respectively.
# Groups 1-2 (s-block) and 13-18 (p-block) are present in the second
# period onwards, groups 3-12 (d-block) the fourth period onwards.
# Check each group contains the expected number of elements.
nelements_in_group = [0]*18
for element in elements.ATOMIC_NUMS.values():
if element.group != -1:
nelements_in_group[element.group-1] += 1
self.assertListEqual(nelements_in_group, [7, 6] + [4]*10 + [6]*5 + [7])
if __name__ == '__main__':
absltest.main()
| ferminet-master | ferminet/utils/tests/elements_test.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.utils.system."""
from absl.testing import absltest
from absl.testing import parameterized
from ferminet.utils import system
from ferminet.utils import units
import numpy as np
class SystemAtomCoordsTest(absltest.TestCase):
def test_atom_coords(self):
xs = np.random.uniform(size=3)
atom = system.Atom(symbol='H', coords=xs, units='angstrom')
np.testing.assert_allclose(atom.coords / xs, [units.BOHR_ANGSTROM]*3)
np.testing.assert_allclose(atom.coords_angstrom, xs)
def test_atom_units(self):
system.Atom(symbol='H', coords=[1, 2, 3], units='bohr')
system.Atom(symbol='H', coords=[1, 2, 3], units='angstrom')
with self.assertRaises(ValueError):
system.Atom(symbol='H', coords=[1, 2, 3], units='dummy')
class SystemCreationsTest(parameterized.TestCase):
@parameterized.parameters(
{'symbol': 'He', 'charge': 0},
{'symbol': 'C', 'charge': 0},
{'symbol': 'Ne', 'charge': 0},
{'symbol': 'Ne', 'charge': 1},
{'symbol': 'Ne', 'charge': -1},
)
def test_create_atom(self, symbol, charge):
mol, spins = system.atom(symbol, charge=charge)
self.assertLen(mol, 1)
self.assertEqual(mol[0].symbol, symbol)
self.assertEqual(sum(spins), mol[0].atomic_number - charge)
np.testing.assert_allclose(np.asarray(mol[0].coords), np.zeros(3))
@parameterized.parameters(
{'symbol': 'LiH'},
{'symbol': 'Li2'},
{'symbol': 'N2'},
{'symbol': 'CO'},
{'symbol': 'CH4'},
{'symbol': 'NH3'},
{'symbol': 'C2H4'},
{'symbol': 'C4H6'},
)
def test_create_molecule(self, symbol):
_, _ = system.molecule(symbol)
@parameterized.parameters(
{'n': 10, 'r': 1.0},
{'n': 11, 'r': 1.0},
{'n': 20, 'r': 2.0},
)
def test_create_hydrogen_chain(self, n, r):
mol, spins = system.hn(n, r)
self.assertLen(mol, n)
for atom in mol:
self.assertAlmostEqual(atom.coords[0], 0)
self.assertAlmostEqual(atom.coords[1], 0)
for atom1, atom2 in zip(mol[:-1], mol[1:]):
self.assertAlmostEqual(atom2.coords[2] - atom1.coords[2], r)
self.assertEqual(spins, (n - n // 2, n // 2))
@parameterized.parameters(
{'r': 1.0, 'angle': np.pi/4.0},
{'r': 1.0, 'angle': np.pi/6.0},
{'r': 2.0, 'angle': np.pi/4.0},
)
def test_create_hydrogen_circle(self, r, angle):
mol, spins = system.h4_circle(r, angle)
self.assertEqual(spins, (2, 2))
self.assertLen(mol, 4)
for atom in mol:
self.assertAlmostEqual(atom.coords[2], 0)
theta = np.abs(np.arctan(atom.coords[1] / atom.coords[0]))
self.assertAlmostEqual(theta, angle)
if __name__ == '__main__':
absltest.main()
| ferminet-master | ferminet/utils/tests/system_test.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.utils.units."""
from absl.testing import absltest
from ferminet.utils import units
import numpy as np
class UnitsTest(absltest.TestCase):
def test_angstrom2bohr(self):
self.assertAlmostEqual(units.angstrom2bohr(2), 3.77945225091, places=10)
def test_angstrom2bohr_numpy(self):
x = np.random.uniform(size=(3,))
x1 = units.angstrom2bohr(x)
x2 = np.array([units.angstrom2bohr(v) for v in x])
np.testing.assert_allclose(x1, x2)
def test_bohr2angstrom(self):
self.assertAlmostEqual(units.bohr2angstrom(2), 1.05835442134, places=10)
def test_bohr2angstrom_numpy(self):
x = np.random.uniform(size=(3,))
x1 = units.bohr2angstrom(x)
x2 = np.array([units.bohr2angstrom(v) for v in x])
np.testing.assert_allclose(x1, x2)
def test_angstrom_bohr_idempotent(self):
x = np.random.uniform()
x1 = units.bohr2angstrom(units.angstrom2bohr(x))
self.assertAlmostEqual(x, x1, places=10)
def test_bohr_angstrom_idempotent(self):
x = np.random.uniform()
x1 = units.angstrom2bohr(units.bohr2angstrom(x))
self.assertAlmostEqual(x, x1, places=10)
def test_hartree2kcal(self):
self.assertAlmostEqual(units.hartree2kcal(2), 1255.018948, places=10)
def test_hartree2kcal_numpy(self):
x = np.random.uniform(size=(3,))
x1 = units.hartree2kcal(x)
x2 = np.array([units.hartree2kcal(v) for v in x])
np.testing.assert_allclose(x1, x2)
def test_kcal2hartree(self):
self.assertAlmostEqual(units.kcal2hartree(2), 0.00318720287, places=10)
def test_kcal2hartree_numpy(self):
x = np.random.uniform(size=(3,))
x1 = units.kcal2hartree(x)
x2 = np.array([units.kcal2hartree(v) for v in x])
np.testing.assert_allclose(x1, x2)
def test_hartree_kcal_idempotent(self):
x = np.random.uniform()
x1 = units.kcal2hartree(units.hartree2kcal(x))
self.assertAlmostEqual(x, x1, places=10)
def test_kcal_hartree_idempotent(self):
x = np.random.uniform()
x1 = units.hartree2kcal(units.kcal2hartree(x))
self.assertAlmostEqual(x, x1, places=10)
if __name__ == '__main__':
absltest.main()
| ferminet-master | ferminet/utils/tests/units_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for all models."""
import pathlib
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
import mujoco
# Internal import.
_ROOT_DIR = pathlib.Path(__file__).parent.parent
_MODEL_DIRS = [f for f in _ROOT_DIR.iterdir() if f.is_dir()]
_MODEL_XMLS: List[pathlib.Path] = []
for model_dir in _MODEL_DIRS:
xmls = model_dir.glob('scene*.xml')
for xml in xmls:
_MODEL_XMLS.append(xml)
# Total simulation duration, in seconds.
_MAX_SIM_TIME = 1.0
# Scale for the pseudorandom control noise.
_NOISE_SCALE = 1.0
def _pseudorandom_ctrlnoise(
model: mujoco.MjModel,
data: mujoco.MjData,
i: int,
noise: float,
) -> None:
for j in range(model.nu):
ctrlrange = model.actuator_ctrlrange[j]
if model.actuator_ctrllimited[j]:
center = 0.5 * (ctrlrange[1] + ctrlrange[0])
radius = 0.5 * (ctrlrange[1] - ctrlrange[0])
else:
center = 0.0
radius = 1.0
data.ctrl[j] = center + radius * noise * (2*mujoco.mju_Halton(i, j+2) - 1)
class ModelsTest(parameterized.TestCase):
@parameterized.parameters(_MODEL_XMLS)
def test_compiles_and_steps(self, xml_path: pathlib.Path) -> None:
model = mujoco.MjModel.from_xml_path(str(xml_path))
data = mujoco.MjData(model)
i = 0
while data.time < _MAX_SIM_TIME:
_pseudorandom_ctrlnoise(model, data, i, _NOISE_SCALE)
mujoco.mj_step(model, data)
i += 1
# Check no warnings were triggered during the simulation.
if not all(data.warning.number == 0):
warning_info = '\n'.join([
f'{mujoco.mjtWarning(enum_value).name}: count={count}'
for enum_value, count in enumerate(data.warning.number) if count
])
self.fail(f'MuJoCo warning(s) encountered:\n{warning_info}')
if __name__ == '__main__':
absltest.main()
| mujoco_menagerie-main | test/model_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smooth conformal training with prediction and calibration."""
import functools
from typing import Tuple, Dict, Any, Callable, Union
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections as collections
import evaluation as cpeval
import smooth_conformal_prediction as scp
import train_coverage as cpcoverage
import train_utils as cputils
SmoothCalibrateFn = Callable[
[Union[Any, jnp.ndarray], Union[Any, jnp.ndarray], Union[Any, jnp.ndarray]],
Union[Any, jnp.ndarray]]
class TrainConformal(cpcoverage.TrainCoverage):
"""Conformal training takes into account calibration and prediction."""
def __init__(self, config, data, optimizer):
"""Initialize conformal training.
Args:
config: training configuration
data: datasets and information
optimizer: optimizer to use
"""
super(TrainConformal, self).__init__(config, data, optimizer)
self.smooth_predict_fn = None
"""(callable) Smooth prediction function to get confidence sets."""
# We need separate calibration functions for training and testing
# to allow different batch sizes.
self.train_smooth_calibrate_fn = None
"""(callable) Training smooth conformal calibration function."""
self.test_smooth_calibrate_fn = None
"""(callable) Test smooth conformal calibration function."""
def compute_loss_and_error_with_calibration(
self,
trainable_params: cputils.FlatMapping,
fixed_params: cputils.FlatMapping,
inputs: jnp.ndarray,
labels: jnp.ndarray,
model_state: cputils.FlatMapping,
training: bool,
rng: jnp.ndarray,
# The calibration function needs to be passed as argument because
# we need to make two copies of compute_loss_and_error: one for
# training and one for testing. This is because smooth_calibrate_fn
# depends on the batch size, which we allow to change between training
# and test set for datasets with very few examples.
smooth_calibrate_fn: SmoothCalibrateFn,
) -> Tuple[jnp.ndarray, Tuple[cputils.FlatMapping, Dict[str, Any]]]:
"""Compute conformal loss with prediction and calibration on split batch.
Calibrates the conformal predictor on the first half of the batch and
computes coverage and size loss on the second half of the batch.
Args:
trainable_params: trainable model parameters
fixed_params: model parameters fixed for fine-tuning
inputs: input examples
labels: ground truth examples
model_state: model state
training: training mode
rng: random key
smooth_calibrate_fn: smooth calibration function
Returns:
Tuple consisting of loss and another tuple of new model state and a
dictionary with additional information
"""
params = hk.data_structures.merge(trainable_params, fixed_params)
logits, new_model_state = self.model.apply(
params, model_state, rng, inputs, training=training)
val_split = int(self.config.conformal.fraction * logits.shape[0])
val_logits = logits[:val_split]
val_labels = labels[:val_split]
test_logits = logits[val_split:]
test_labels = labels[val_split:]
val_tau = smooth_calibrate_fn(val_logits, val_labels, rng)
test_confidence_sets = self.smooth_predict_fn(test_logits, val_tau, rng)
coverage_loss = self.coverage_loss_fn(test_confidence_sets, test_labels)
size_loss = self.size_loss_fn(
test_confidence_sets, test_logits, test_labels)
size_loss *= self.config.conformal.size_weight
weight_decay_loss = cputils.compute_weight_decay(params)
weight_decay_loss *= self.config.weight_decay
cross_entropy_loss = cputils.compute_cross_entropy_loss(logits, labels)
cross_entropy_loss *= self.config.conformal.cross_entropy_weight
loss = self.loss_transform_fn(coverage_loss + size_loss + 1e-8)
loss += cross_entropy_loss
loss += weight_decay_loss
test_confidence_sets = jnp.greater(
test_confidence_sets, jnp.ones_like(test_confidence_sets) * 0.5)
error = 1 - cpeval.compute_accuracy(logits, labels)
coverage = cpeval.compute_coverage(test_confidence_sets, test_labels)
size, _ = cpeval.compute_size(test_confidence_sets)
return loss, (new_model_state, {
'coverage_loss': coverage_loss,
'size_loss': size_loss,
'cross_entropy_loss': cross_entropy_loss,
'weight_decay': weight_decay_loss,
'error': error,
'coverage': coverage,
'size': size,
})
def compute_loss_and_error(
self,
trainable_params: cputils.FlatMapping,
fixed_params: cputils.FlatMapping,
inputs: jnp.ndarray,
labels: jnp.ndarray,
model_state: cputils.FlatMapping,
training: bool,
rng: jnp.ndarray,
) -> Tuple[jnp.ndarray, Tuple[cputils.FlatMapping, Dict[str, Any]]]:
"""To be safe, override as not implemented."""
raise NotImplementedError
def select_smooth_calibrate(
self,
config: collections.ConfigDict
) -> Tuple[SmoothCalibrateFn, SmoothCalibrateFn]:
"""Select smooth confidence set prediction and calibration functions.
See smooth_conformal_prediction for options.
Args:
config: sub-configuration for selecting prediction/calibration function
Returns:
Smooth calibration function
"""
train_calibration_examples = int(
self.config.conformal.fraction * self.config.batch_size)
test_calibration_examples = int(
self.config.conformal.fraction * self.config.test_batch_size)
def get_smooth_quantile_fn(calibration_examples):
"""Helper to create smooth quantile function for given #examples."""
return functools.partial(
scp.smooth_conformal_quantile,
sos=self.get_sos(calibration_examples),
dispersion=config.dispersion)
get_right_smooth_quantile_fn = get_smooth_quantile_fn
if config.method == 'threshold':
def smooth_calibrate_fn(logits, labels, unused_rng, quantile_fn):
return scp.smooth_calibrate_threshold(
logits, labels, alpha=config.alpha,
smooth_quantile_fn=quantile_fn)
elif config.method == 'threshold_p':
def smooth_calibrate_fn(logits, labels, unused_rng, quantile_fn):
probabilities = jax.nn.softmax(logits, axis=1)
return scp.smooth_calibrate_threshold(
probabilities, labels, alpha=config.alpha,
smooth_quantile_fn=quantile_fn)
elif config.method == 'threshold_logp':
def smooth_calibrate_fn(logits, labels, unused_rng, quantile_fn):
log_probabilities = jax.nn.log_softmax(logits, axis=1)
return scp.smooth_calibrate_threshold(
log_probabilities, labels, alpha=config.alpha,
smooth_quantile_fn=quantile_fn)
elif config.method == 'aps':
sos = self.get_sos(self.data['classes'])
def smooth_calibrate_fn(logits, labels, rng, quantile_fn):
probabilities = jax.nn.softmax(logits, axis=1)
return scp.smooth_calibrate_aps(
probabilities, labels,
alpha=config.alpha,
sos=sos, rng=rng if config.rng else None,
dispersion=config.dispersion,
smooth_quantile_fn=quantile_fn)
else:
raise ValueError('Invalid smooth calibration method.')
train_smooth_calibrate_fn = functools.partial(
smooth_calibrate_fn,
quantile_fn=get_right_smooth_quantile_fn(train_calibration_examples))
test_smooth_calibrate_fn = functools.partial(
smooth_calibrate_fn,
quantile_fn=get_right_smooth_quantile_fn(test_calibration_examples))
return train_smooth_calibrate_fn, test_smooth_calibrate_fn
def get_conformal_config(self):
"""Overridable helper to select the right config.
Returns:
Configuration for conformal training
"""
return self.config.conformal
def get_train_fns(
self) -> Tuple[cputils.LossFn, functools.partial]:
"""For conformal training, we use separate training and test loss fn."""
conformal_config = self.get_conformal_config()
smooth_calibrate_fns = self.select_smooth_calibrate(conformal_config)
self.train_smooth_calibrate_fn = smooth_calibrate_fns[0]
self.test_smooth_calibrate_fn = smooth_calibrate_fns[1]
self.smooth_predict_fn = self.select_smooth_predict(conformal_config)
self.coverage_loss_fn = self.select_coverage_loss(conformal_config)
self.size_loss_fn = self.select_size_loss(conformal_config)
self.loss_transform_fn = self.select_loss_transform(conformal_config)
train_loss_fn = functools.partial(
self.compute_loss_and_error_with_calibration,
smooth_calibrate_fn=self.train_smooth_calibrate_fn)
test_loss_fn = functools.partial(
self.compute_loss_and_error_with_calibration,
smooth_calibrate_fn=self.test_smooth_calibrate_fn)
# The training loss is only used within the update function.
update_fn = functools.partial(
cputils.update, loss_fn=train_loss_fn, optimizer=self.optimizer)
if self.config.jit:
test_loss_fn = jax.jit(test_loss_fn, static_argnames='training')
update_fn = jax.jit(update_fn, static_argnames='training')
return test_loss_fn, update_fn
# Need to override this again from TrainCoverage as we do not need
# separate calibration when fine-tuning.
def run(self, rng: hk.PRNGSequence):
"""Main training procedure.
Args:
rng: random key sequence
"""
trainable_params, fixed_params, model_state = self.setup(rng)
params, model_state = self.train(
trainable_params, fixed_params, model_state, rng)
self.test(params, model_state)
| conformal_training-main | train_conformal.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment policy for CIFAR10."""
import inspect
import math
import tensorflow.compat.v2 as tf
import tensorflow_addons.image as contrib_image
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_cifar10():
"""CIFAR10 AutoAugment from https://arxiv.org/pdf/1805.09501.pdf Tab 7."""
policy = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)], # 0
[('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)], # 1
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)], # 2
[('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)], # 3
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)], # 4
[('ShearY', 0.2, 7), ('Posterize', 0.3, 7)], # 5
[('Color', 0.4, 3), ('Brightness', 0.6, 7)], # 6
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)], # 7
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)], # 8
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)], # 9
[('Color', 0.7, 7), ('TranslateX', 0.5, 8)], # 10
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)], # 11
[('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)], # 12
[('Brightness', 0.9, 6), ('Color', 0.2, 8)], # 13
[('Solarize', 0.5, 2), ('Invert', 0., 3)], # 14
[('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)], # 15
[('Equalize', 0.2, 8), ('Equalize', 0.6, 4)], # 16
[('Color', 0.9, 9), ('Equalize', 0.6, 6)], # 17
[('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)], # 18
[('Brightness', 0.1, 3), ('Color', 0.7, 0)], # 19
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)], # 20
[('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)], # 21
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)], # 22
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)], # 23
[('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)], # 24
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.cast(image1, tf.float32) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_increasing(image, threshold=128):
# Version of solarize where the magnitude of the transformation
# increases with M.
image = tf.cast(image, dtype=tf.float32)
image = tf.where(image < (256 - threshold), image, 255 - image)
return tf.cast(image, tf.uint8)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_x_rel(image, pct, replace):
"""Equivalent of PIL Translate in X dimension."""
max_x = tf.shape(image)[1]
pixels = tf.cast(max_x, tf.float32) * pct
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def translate_y_rel(image, pct, replace):
"""Equivalent of PIL Translate in Y dimension."""
max_y = tf.shape(image)[0]
pixels = tf.cast(max_y, tf.float32) * pct
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.constant([replace] * 3 + [1], image.dtype)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.expand_dims(tf.equal(alpha_channel, 0), 1),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'PosterizeIncreasing': posterize,
'Solarize': solarize,
'SolarizeIncreasing': solarize_increasing,
'SolarizeAdd': solarize_add,
'Color': color,
'ColorIncreasing': color,
'Contrast': contrast,
'ContrastIncreasing': contrast,
'Brightness': brightness,
'BrightnessIncreasing': brightness,
'Sharpness': sharpness,
'SharpnessIncreasing': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateXRel': translate_x_rel,
'TranslateY': translate_y,
'TranslateYRel': translate_y_rel,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level):
# Such that the magnitude of the transformation
# increases with M and to have two-sided transformations.
level = (level/_MAX_LEVEL) * 0.9
level = 1. + _randomly_negate_tensor(level)
return (level,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_rel_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.45
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'PosterizeIncreasing': lambda level: (4 - int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeIncreasing': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'ColorIncreasing': _enhance_increasing_level_to_arg,
'Contrast': _enhance_level_to_arg,
'ContrastIncreasing': _enhance_increasing_level_to_arg,
'Brightness': _enhance_level_to_arg,
'BrightnessIncreasing': _enhance_increasing_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'SharpnessIncreasing': _enhance_increasing_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'Cutout': lambda level: (
int((level/_MAX_LEVEL) * hparams['cutout_const']),),
'TranslateX': lambda level: _translate_level_to_arg(
level, hparams['translate_const']),
'TranslateXRel': _translate_rel_level_to_arg,
'TranslateY': lambda level: _translate_level_to_arg(
level, hparams['translate_const']),
'TranslateYRel': _translate_rel_level_to_arg,
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getfullargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getfullargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image, augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = augmentation_hparams['replace_const']
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(
image, augmentation_name='v0',
cutout_const=None, translate_const=None, replace_const=None):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
cutout_const: The cutout patch size is 2 * cutout_const; defaults to
8 on CIFAR-10 and 100 on ImageNet.
translate_const: The maximum number of pixels the image is translated by;
defaults to 32 on CIFAR-10 and 250 on ImageNet.
replace_const: The constant value to fill empty pixels with;
defaults to 121 on CIFAR-10 and 128 on ImageNet.
Returns:
A tuple containing the augmented versions of `image`.
"""
if cutout_const is not None and cutout_const < 0:
raise ValueError('Invalid cutout size')
if translate_const is not None and translate_const < 0:
raise ValueError('Invalid translation constant')
if replace_const is not None and (replace_const < 0 or replace_const > 255):
raise ValueError('Invalid constant to fill/replace pixels with')
available_policies = {
'cifar10': policy_cifar10,
}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
if augmentation_name == 'cifar10':
cutout_const = cutout_const or 8
translate_const = translate_const or 32
replace_const = replace_const or 121
else:
# Defaults to ImageNet augmentation hyper-parameters.
cutout_const = cutout_const or 100
translate_const = translate_const or 250
replace_const = replace_const or 128
image_dtype = image.dtype
image = tf.image.convert_image_dtype(image, dtype=tf.uint8)
policy = available_policies[augmentation_name]()
augmentation_hparams = {
'cutout_const': cutout_const,
'translate_const': translate_const,
'replace_const': replace_const,
}
image = build_and_apply_nas_policy(policy, image, augmentation_hparams)
image = tf.image.convert_image_dtype(image, dtype=image_dtype)
return image
| conformal_training-main | auto_augment.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training loop for normal training."""
import functools
from typing import Callable, Tuple, Dict, Any
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import data as cpdata
import evaluation as cpeval
import open_source_utils as cpstaging
import train_utils as cputils
ShiftFn = Callable[[jnp.ndarray, jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]
class TrainNormal:
"""Normal training routine."""
def __init__(self, config, data, optimizer):
"""Initialize normal training.
Args:
config: training configuration
data: datasets and information
optimizer: optimizer to use
"""
self.config = config
""" (collections.ConfigDict) Training configuration. """
self.data = data
""" (Dict[str, any]) Datasets and information."""
self.model = None
""" (hk.TransformedWithState) Model to train. """
self.optimizer = optimizer
""" (optax.GradientTransformation) Optimizer for training. """
# Mainly for conformal training and backwards compatibility, we use
# the same batch size for training and testing by default.
if self.config.test_batch_size is None:
self.config.test_batch_size = self.config.batch_size
def compute_loss_and_error(
self,
trainable_params: cputils.FlatMapping,
fixed_params: cputils.FlatMapping,
inputs: jnp.ndarray,
labels: jnp.ndarray,
model_state: cputils.FlatMapping,
training: bool,
rng: jnp.ndarray,
) -> Tuple[jnp.ndarray, Tuple[cputils.FlatMapping, Dict[str, Any]]]:
"""Compute cross-entropy loss with weight decay and error rate.
Args:
trainable_params: trainable model parameters
fixed_params: model parameters fixed for fine-tuning
inputs: input examples
labels: ground truth examples
model_state: model state
training: training mode
rng: random key
Returns:
Tuple consisting of loss and another tuple of new model state and a
dictionary with additional information
"""
params = hk.data_structures.merge(trainable_params, fixed_params)
logits, new_model_state = self.model.apply(
params, model_state, rng, inputs, training=training)
cross_entropy_loss = cputils.compute_cross_entropy_loss(logits, labels)
weight_decay_loss = cputils.compute_weight_decay(params)
weight_decay_loss *= self.config.weight_decay
error = 1 - cpeval.compute_accuracy(logits, labels)
loss = cross_entropy_loss + weight_decay_loss
return loss, (new_model_state, {
'cross_entropy': cross_entropy_loss,
'weight_decay': weight_decay_loss,
'error': error,
})
def get_train_fns(self) -> Tuple[cputils.LossFn, functools.partial]:
"""Get training loss and update function.
Returns:
Loss and update function
"""
loss_fn = self.compute_loss_and_error
update_fn = functools.partial(
cputils.update, loss_fn=loss_fn, optimizer=self.optimizer)
if self.config.jit:
loss_fn = jax.jit(loss_fn, static_argnames='training')
update_fn = jax.jit(update_fn, static_argnames='training')
return loss_fn, update_fn
def setup(
self, rng: hk.PRNGSequence
) -> Tuple[cputils.FlatMapping, cputils.FlatMapping, cputils.FlatMapping]:
"""Set up model.
Args:
rng: random key sequence
Returns:
Trainable parameters, fixed parameters and model state
"""
def update_flatmapping(base_mapping, mapping, excluded_layers):
"""Helper to update params and model state with loaded ones."""
mapping = hk.data_structures.to_mutable_dict(mapping)
for key in base_mapping.keys():
include = True
for excluded_layer in excluded_layers:
if key.find(excluded_layer) >= 0:
include = False
if include:
mapping[key] = base_mapping[key]
return hk.data_structures.to_haiku_dict(mapping)
def partition_params(module_name, unused_name, unused_value):
"""Helper to partition parameters into trainable and fixed."""
return (self.config.finetune.layers is None
or module_name in include_layers)
def log_params(params):
"""Helper to log a set of parameters."""
for module_name, name, _ in hk.data_structures.traverse(params):
logging.info('%s.%s', module_name, name)
if self.config.finetune.enabled:
# Layers to be fine-tuned:
include_layers = self.config.finetune.layers or ''
include_layers = include_layers.split(',')
path, self.model, base_params, base_model_state = cputils.load_model(
self.config, self.data)
logging.info('Loaded pre-trained model from %s.', path)
# We re-initialize the whole model and set the loaded parameters
# only for those layers that are not supposed to be fine-tuned.
if self.config.finetune.reinitialize:
params, model_state = cputils.init_model(
self.data, self.model, rng)
params = update_flatmapping(base_params, params, include_layers)
model_state = update_flatmapping(
base_model_state, model_state, include_layers)
else:
params = base_params
model_state = base_model_state
trainable_params, fixed_params = hk.data_structures.partition(
partition_params, params)
# For training from scratch we just set all parameters as trainable.
else:
self.model = cputils.create_model(self.config, self.data)
trainable_params, model_state = cputils.init_model(
self.data, self.model, rng)
fixed_params = {}
logging.info('Created model %s.', self.config.architecture)
logging.info('Trainable parameters:')
log_params(trainable_params)
logging.info('Fixed parameteers:')
log_params(fixed_params)
return trainable_params, fixed_params, model_state
def train(
self, trainable_params: cputils.FlatMapping,
fixed_params: cputils.FlatMapping,
model_state: cputils.FlatMapping, rng: hk.PRNGSequence
) -> Tuple[cputils.FlatMapping, cputils.FlatMapping]:
"""Normal training loop.
Args:
trainable_params: parameters to train
fixed_params: fixed parameters in the case of fine-tuning
model_state: model state
rng: random key sequence
Returns:
Parameters and model state
"""
optimizer_state = self.optimizer.init(trainable_params)
logging.info('Initialized optimizer for training.')
loss_fn, update_fn = self.get_train_fns()
checkpoint = cpstaging.create_checkpoint(self.config)
cputils.update_checkpoint(
checkpoint, trainable_params, fixed_params,
model_state, optimizer_state, 0)
checkpoint.restore_or_save()
while checkpoint.state.epoch < self.config.epochs:
logging.info('Epoch %d:', checkpoint.state.epoch)
for b, (inputs, labels) in enumerate(
cpdata.load_batches(self.data['train'])):
loss, trainable_params, new_model_state, optimizer_state, mixed = update_fn(
trainable_params, fixed_params, inputs, labels, model_state,
True, optimizer_state, next(rng))
if not self.config.finetune.enabled or self.config.finetune.model_state:
model_state = new_model_state
log_mixed = ' '.join(['%s=%g' % (k, v) for (k, v) in mixed.items()])
logging.info('Epoch %d, batch %d: loss=%g %s',
checkpoint.state.epoch, b, loss, log_mixed)
count = 0
values = {}
for inputs, labels in cpdata.load_batches(self.data['test']):
loss_b, (_, mixed) = loss_fn(
trainable_params, fixed_params,
inputs, labels, model_state, False, next(rng))
mixed['loss'] = loss_b
values = {k: values.get(k, 0) + v for (k, v) in mixed.items()}
count += 1
# Compute averages for each logged value.
values = {k: v/count for (k, v) in values.items()}
log_mixed = ' '.join(['%s=%g' % (k, v) for (k, v) in values.items()])
logging.info('Epoch %d, test: %s', checkpoint.state.epoch, log_mixed)
cputils.update_checkpoint(
checkpoint, trainable_params, fixed_params,
model_state, optimizer_state, checkpoint.state.epoch + 1)
if checkpoint.state.epoch % self.config.checkpoint_frequency == 0:
checkpoint.save()
params = hk.data_structures.merge(trainable_params, fixed_params)
return params, model_state
def _test_dataset(
self, params: cputils.FlatMapping, model_state: cputils.FlatMapping,
dataset: tf.data.Dataset, name: str, epochs: int, shift_fn: ShiftFn):
"""Helper to evaluate model on given dataset.
Args:
params: trained parameters of the model
model_state: model state
dataset: dataset to evaluate
name: identifier for dataset
epochs: number of epochs to run on dataset
shift_fn: shift function to apply distribution shift to images
"""
rng = hk.PRNGSequence(0)
writer = cpstaging.create_writer(self.config, 'eval_%s' % name)
for epoch in range(epochs):
logits = []
labels = []
for inputs_b, labels_b in cpdata.load_batches(dataset):
inputs_b, _ = shift_fn(inputs_b, next(rng))
logits_b, _ = self.model.apply(
params, model_state, None, inputs_b, training=False)
logits.append(logits_b)
labels.append(labels_b)
logits = jnp.concatenate(logits, axis=0)
labels = jnp.concatenate(labels, axis=0)
error = 1 - cpeval.compute_accuracy(
jax.nn.softmax(logits, axis=1), labels)
logging.info('Evaluation, %s: %d examples [epoch=%d], error=%g',
name, logits.shape[0], epoch, error)
writer.write({
'logits': np.array(logits, np.float32),
'labels': np.array(labels, np.float32),
})
def test(self, params: cputils.FlatMapping, model_state: cputils.FlatMapping):
"""Test trained model on training, validation and test sets.
Args:
params: trained parameters of the model
model_state: model state
"""
no_shift_fn = lambda inputs, rng: (inputs, None)
num_epochs_per_dataset = {
'val': (self.data['val'], 1, no_shift_fn),
'test': (self.data['test'], 1, no_shift_fn),
'train_clean': (self.data['train_clean'], 1, no_shift_fn),
# Without data augmentation we might not need to do multiple passes.
'train_ordered': (self.data['train_ordered'], -1, no_shift_fn),
}
for name, (dataset, num_epochs, shift_fn) in num_epochs_per_dataset.items():
# Check for None in case we train without validation examples.
if dataset is not None:
self._test_dataset(
params, model_state, dataset, name, num_epochs, shift_fn)
def run(self, rng: hk.PRNGSequence):
"""Main training procedure.
Args:
rng: random key sequence
"""
trainable_params, fixed_params, model_state = self.setup(rng)
params, model_state = self.train(
trainable_params, fixed_params, model_state, rng)
self.test(params, model_state)
| conformal_training-main | train_normal.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to avoid redundancy in tests."""
from typing import Optional
import jax.numpy as jnp
import numpy as np
import scipy.special
def get_labels(num_examples: int, num_classes: int) -> jnp.ndarray:
"""Get random labels.
Args:
num_examples: number of examples
num_classes: number of classes
Returns:
Labels
"""
return jnp.array(np.random.randint(0, num_classes, (num_examples)))
def get_probabilities(
labels: jnp.ndarray, dominance: float,
log: Optional[bool] = False) -> jnp.ndarray:
"""Get random probabilities where the logit of the true label dominates.
Args:
labels: labels to generate probabilities for
dominance: float value added to the logit of the true label before
applying softmax; determines whether probability of true class is the
largest
log: return log-probabilities
Returns:
Probabilities
"""
probabilities = np.random.random((labels.shape[0], np.max(labels) + 1))
probabilities[np.arange(probabilities.shape[0]), labels] += dominance
probabilities = scipy.special.softmax(probabilities, axis=1)
if log:
probabilities = np.log(probabilities)
return jnp.array(probabilities)
| conformal_training-main | test_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Launches experiments."""
import copy
import os
from typing import Tuple, Dict, Any
from absl import flags
from absl import logging
import ml_collections as collections
from absl import app
from config import get_config
# pylint: disable=unused-import
from experiments.run_cifar10 import get_parameters as get_cifar10_parameters
from experiments.run_cifar100 import get_parameters as get_cifar100_parameters
from experiments.run_emnist_byclass import get_parameters as get_emnist_byclass_parameters
from experiments.run_fashion_mnist import get_parameters as get_fashion_mnist_parameters
from experiments.run_mnist import get_parameters as get_mnist_parameters
from experiments.run_wine_quality import get_parameters as get_wine_quality_parameters
from train import train
FLAGS = flags.FLAGS
flags.DEFINE_string('experiment_dataset', 'cifar10', 'dataset to use')
flags.DEFINE_string('experiment_experiment', '',
'experiments to run on dataset')
flags.DEFINE_integer('experiment_seeds', 10,
'number of seed to run per experiment')
flags.DEFINE_boolean('experiment_debug', False,
'debug experiment sweep and seeds')
flags.DEFINE_string('experiment_path', './', 'base path for experiments')
def get_parameters(
dataset: str,
experiment: str,
) -> Tuple[collections.ConfigDict, Dict[str, Any]]:
"""Get parameters for given dataset.
Args:
dataset: dataset to experiment on
experiment: experiment to run
Returns:
Configuration arguments and hyper-parameter sweep.
"""
config = get_config()
config.architecture = 'mlp'
config.cnn.channels = 32
config.cnn.layers = 3
config.cnn.kernels = 3
config.cnn.activation = 'relu'
config.mlp.units = 64
config.mlp.layers = 2
config.mlp.activation = 'relu'
config.resnet.version = 18
config.resnet.channels = 32
config.resnet.resnet_v2 = True
config.resnet.init_logits = True
config.optimizer = 'sgd'
config.adam.b1 = 0.9
config.adam.b2 = 0.999
config.adam.eps = 1e-8
config.sgd.momentum = 0.9
config.sgd.nesterov = True
config.learning_rate = 0.01
config.learning_rate_schedule = 'step'
config.step.learning_rate_decay = 0.1
config.exponential.learning_rate_decay = 0.95
config.mode = 'normal'
config.coverage.method = 'threshold_p'
config.coverage.alpha = 0.01
config.coverage.target_alpha = 0.01
config.coverage.temperature = 1.
config.coverage.dispersion = 0.1
config.coverage.size_weight = 0.05
config.coverage.tau = 1
config.coverage.coverage_loss = 'classification'
config.coverage.loss_matrix = ()
config.coverage.cross_entropy_weight = 0.
config.coverage.size_loss = 'valid'
config.coverage.size_transform = 'identity'
config.coverage.size_bound = 3.
config.coverage.size_bound_weight = 0.9
config.coverage.loss_transform = 'log'
config.coverage.size_weights = ()
config.coverage.rng = False
config.coverage.calibration_batches = 10
config.conformal.method = 'threshold_p'
config.conformal.alpha = 0.01
config.conformal.target_alpha = 0.01
config.conformal.temperature = 1.
config.conformal.dispersion = 0.1
config.conformal.size_weight = 0.1
config.conformal.coverage_loss = 'classification'
config.conformal.loss_matrix = ()
config.conformal.cross_entropy_weight = 0.
config.conformal.size_loss = 'valid'
config.conformal.size_transform = 'identity'
config.conformal.size_bound = 3.
config.conformal.size_bound_weight = 0.9
config.conformal.loss_transform = 'log'
config.conformal.size_weights = ()
config.conformal.fraction = 0.5
config.conformal.rng = False
config.weight_decay = 0.0005
config.batch_size = 500
config.test_batch_size = 100
config.epochs = 150
config.finetune.enabled = False
config.finetune.model_state = False
config.finetune.experiment_id = None
config.finetune.work_unit_id = None
config.finetune.layers = None
config.finetune.reinitialize = False
config.dataset = dataset
config.seed = 0
config.checkpoint_frequency = 10
config.resampling = 0
config.whitening = True
config.cifar_augmentation = 'standard+autoaugment+cutout'
config.val_examples = 5000
config.checkpoint_dtl = 155
config.jit = True
experiment = experiment.split('.')
sub_experiment = experiment[1] if len(experiment) > 1 else None
experiment = experiment[0]
get_parameters_key = 'get_%s_parameters' % dataset
if get_parameters_key not in globals().keys():
raise ValueError('Experiment definitions could not be loaded.')
config, parameter_sweep = globals()[get_parameters_key](
experiment, sub_experiment, config)
return config, parameter_sweep
def main(argv):
del argv
supported_datasets = (
'wine_quality',
'mnist',
'emnist_byclass',
'fashion_mnist',
'cifar10',
'cifar100',
)
if FLAGS.experiment_dataset not in supported_datasets:
raise ValueError('Invalid dataset selected.')
if FLAGS.experiment_seeds <= 0:
raise ValueError('Invalid number of seeds.')
logging.info(
'starting dataset=%s experiment=%s seeds=%d',
FLAGS.experiment_dataset, FLAGS.experiment_experiment,
FLAGS.experiment_seeds)
config, parameter_sweep = get_parameters(
FLAGS.experiment_dataset, FLAGS.experiment_experiment)
config.path = os.path.join(
FLAGS.experiment_path,
'%s_%s' % (FLAGS.experiment_dataset, FLAGS.experiment_experiment))
config.finetune.path = os.path.join(
FLAGS.experiment_path, config.finetune.path)
if FLAGS.experiment_seeds > 1:
config.resampling = 5
logging.info('resampling=%d', config.resampling)
def update_config(config, key, value):
"""Helper to easily update a config value by dot-separated key."""
if key.count('.') > 1:
raise ValueError(f'Key {key} not supported.')
elif key.count('.') == 1:
key, sub_key = key.split('.')
config[key][sub_key] = value
else:
config[key] = value
for seed in range(FLAGS.experiment_seeds):
# A sweep in one parameter is supported, e.g., the loss matrix or weights.
if parameter_sweep is not None:
sweep_key = parameter_sweep['key']
for i, sweep_value in enumerate(parameter_sweep['values']):
sweep_config = copy.deepcopy(config)
update_config(sweep_config, sweep_key, sweep_value)
update_config(sweep_config, 'seed', seed)
path = config.path + '_value%d_seed%d/' % (i, seed)
update_config(sweep_config, 'path', path)
logging.info(
'running %s=%r seed=%d path=%s', sweep_key, sweep_value, seed, path)
if not FLAGS.experiment_debug:
train(sweep_config)
# Only update config regarding seed and path, no other values are changed.
else:
seed_config = copy.deepcopy(config)
update_config(seed_config, 'seed', seed)
path = config.path + '_seed%d/' % seed
update_config(seed_config, 'path', path)
logging.info('running seed=%d path=%s', seed, path)
if not FLAGS.experiment_debug:
train(seed_config)
if __name__ == '__main__':
app.run(main)
| conformal_training-main | run.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests sorting nets."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import sorting_nets
import variational_sorting_net
class SortingNetsTest(parameterized.TestCase):
@parameterized.parameters([
[4],
])
def test_create_comms(self, length):
comms = sorting_nets.comm_pattern_bitonic(2)
chex.assert_equal(comms["alg"], "bitonic")
comms = sorting_nets.comm_pattern_batcher(length, make_parallel=True)
chex.assert_equal(comms["alg"], "batcher-bitonic")
comms = sorting_nets.comm_pattern_batcher(length, make_parallel=False)
chex.assert_equal(comms["alg"], "batcher-bitonic")
@parameterized.parameters([
[[[[0, 1]], [[1, 2]], [[0, 2]]], 3]
])
def test_comm_pattern_from_list(self, snet_list, num_stages):
comms = sorting_nets.comm_pattern_from_list(snet_list)
chex.assert_equal(comms["alg"], "fixed")
chex.assert_equal(comms["num_stages"], num_stages)
@parameterized.parameters([
[[[[0, 1]], [[2, 3]], [[0, 2]]], 2]
])
def test_parallelize(self, snet_list, final_len):
snet_par = sorting_nets.parallelize(snet_list)
chex.assert_equal(len(snet_par), final_len)
comms = sorting_nets.comm_pattern_from_list(snet_par)
chex.assert_equal(comms["alg"], "fixed")
chex.assert_equal(comms["num_wires"], 4)
chex.assert_equal(comms["num_stages"], 2)
chex.assert_equal(comms["num_comparators"], 3)
def test_prune(self):
snet_list = sorting_nets.SNET_10
snet_pruned = sorting_nets.prune(snet_list, keep=[9])
comms = sorting_nets.comm_pattern_from_list(snet_pruned, make_parallel=True)
chex.assert_equal(comms["alg"], "fixed")
chex.assert_equal(comms["num_wires"], 10)
chex.assert_equal(comms["num_stages"], 4)
chex.assert_equal(comms["num_comparators"], 9)
k_top = 2
length = comms["num_wires"]
keep = list(range(length-1, length -1 - k_top - 1, -1))
pruned_list = sorting_nets.prune(snet_list, keep=keep)
comms = sorting_nets.comm_pattern_from_list(pruned_list, make_parallel=True)
bs = variational_sorting_net.VariationalSortingNet(comms)
prng_key = jax.random.PRNGKey(1)
x = jax.random.uniform(prng_key, [length])
xh, _ = bs.sort_tester(x, dispersion=0.1)
x_sort = jnp.sort(x)
chex.assert_equal(xh[-1], x_sort[-1])
chex.assert_equal(xh[-2], x_sort[-2])
if __name__ == "__main__":
absltest.main()
| conformal_training-main | sorting_nets_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
import models as cpmodels
class ModelsTest(parameterized.TestCase):
def _test_model(self, classes, model, jit=False):
def forward(params, model_state, rng, inputs, training):
outputs, _ = model.apply(params, model_state, rng, inputs, training)
return outputs
if jit:
forward = jax.jit(forward, static_argnums=4)
batch_size = 10
inputs = np.random.rand(batch_size, 32, 32, 3).astype(jnp.float32)
rng = jax.random.PRNGKey(0)
params, model_state = model.init(rng, inputs, training=True)
outputs = forward(params, model_state, rng, inputs, training=True)
self.assertEqual(outputs.shape, (batch_size, classes))
outputs = forward(params, model_state, rng, inputs, training=False)
self.assertEqual(outputs.shape, (batch_size, classes))
@parameterized.parameters([
dict(classes=10, activation='relu', units=[16, 16]),
dict(classes=10, activation='relu', units=[16, 16], jit=True),
dict(classes=100, activation='relu', units=[16, 16]),
dict(classes=10, activation='tanh', units=[16, 16]),
dict(classes=10, activation='relu', units=[16]),
])
def test_mlp_classes(self, classes, activation, units, jit=False):
model = cpmodels.create_mlp(classes, activation=activation, units=units)
self._test_model(classes, model, jit=jit)
@parameterized.parameters([
dict(classes=10, activation='a', units=[128]),
dict(classes=0, activation='relu', units=[128]),
])
def test_mlp_errors(self, classes, activation, units):
with self.assertRaises(ValueError):
cpmodels.create_mlp(classes, activation, units)
@parameterized.parameters([
dict(classes=10, activation='relu',
channels=[8, 16, 32], kernels=[3, 3, 3]),
dict(classes=10, activation='relu',
channels=[8, 16, 32], kernels=[3, 3, 3], jit=True),
dict(classes=100, activation='relu',
channels=[8, 16, 32], kernels=[3, 3, 3]),
dict(classes=10, activation='tanh',
channels=[8, 16, 32], kernels=[3, 3, 3]),
dict(classes=10, activation='relu',
channels=[8, 16], kernels=[3, 3]),
dict(classes=10, activation='relu',
channels=[8, 16, 32], kernels=[5, 5, 5]),
])
def test_cnn_classes(self, classes, activation, channels, kernels, jit=False):
model = cpmodels.create_cnn(
classes, activation=activation, channels=channels, kernels=kernels)
self._test_model(classes, model, jit=jit)
@parameterized.parameters([
dict(classes=10, activation='relu', channels=[], kernels=[3, 3, 3]),
dict(classes=10, activation='relu', channels=[64, 128, 256], kernels=[]),
dict(classes=10, activation='a', channels=[64, 128], kernels=[3, 3]),
dict(classes=10, activation='relu', channels=[64, 128],
kernels=[3, 3, 3]),
dict(classes=0, activation='relu', channels=[64, 128, 256],
kernels=[3, 3, 3]),
])
def test_cnn_errors(self, classes, activation, channels, kernels):
with self.assertRaises(ValueError):
cpmodels.create_cnn(classes, activation, channels, kernels)
@parameterized.parameters([
dict(classes=10, version=18),
dict(classes=10, version=18, jit=True),
dict(classes=100, version=18),
dict(classes=10, version=18, channels=32),
])
def test_resnet_classes(
self, classes, version, resnet_v2=False, channels=64, jit=True):
model = cpmodels.create_resnet(
classes, version, channels, resnet_v2)
self._test_model(classes, model, jit=jit)
@parameterized.parameters([
dict(classes=10, version=17, channels=64),
dict(classes=10, version=18, channels=0),
dict(classes=0, version=18, channels=64),
])
def test_resnet_errors(self, classes, version, channels):
with self.assertRaises(ValueError):
cpmodels.create_resnet(classes, version, channels)
def test_resnet_initial_conv_params(self):
batch_size = 100
model = cpmodels.create_resnet(10, 18, 64, False)
inputs = np.random.rand(batch_size, 32, 32, 3).astype(jnp.float32)
params, _ = model.init(
jax.random.PRNGKey(0), inputs, training=True)
chex.assert_shape(params['res_net/~/initial_conv_1']['w'], (3, 3, 3, 64))
if __name__ == '__main__':
absltest.main()
| conformal_training-main | models_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training utilities common across different training schemes."""
from typing import Dict, Any, Tuple, List, Callable, Union
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections as collections
import optax
import data as cpdata
import models as cpmodels
import open_source_utils as cpstaging
FlatMapping = Union[hk.Params, hk.State]
LossFn = Callable[
[FlatMapping, FlatMapping, jnp.ndarray,
jnp.ndarray, FlatMapping, bool, jnp.ndarray],
Tuple[jnp.ndarray, Tuple[FlatMapping, Dict[str, Any]]]
]
def create_model(
config: collections.ConfigDict,
data: Dict[str, Any]) -> hk.TransformedWithState:
"""Helper to get model based on configuration and data.
Args:
config: training configuration
data: data from get_data
Returns:
Created model.
"""
model_config = config[config.architecture]
whitening = [data['means'], data['stds']] if config.whitening else None
if config.architecture == 'mlp':
mlp_units = [model_config.units]*model_config.layers
model = cpmodels.create_mlp(
data['classes'], activation=model_config.activation, units=mlp_units,
whitening=whitening)
elif config.architecture == 'cnn':
cnn_channels = [
model_config.channels*2**i for i in range(model_config.layers)]
cnn_kernels = [model_config.kernels for _ in range(model_config.layers)]
model = cpmodels.create_cnn(
data['classes'], activation=model_config.activation,
channels=cnn_channels, kernels=cnn_kernels,
whitening=whitening)
elif config.architecture == 'resnet':
logit_w_init = None if model_config.init_logits else jnp.zeros
model = cpmodels.create_resnet(
data['classes'], version=model_config.version,
channels=model_config.channels, resnet_v2=model_config.resnet_v2,
whitening=whitening, logit_w_init=logit_w_init)
else:
raise ValueError('Invalid architecture selected.')
return model
def load_model(
config: collections.ConfigDict, data: Dict[str, Any]
) -> Tuple[str, hk.TransformedWithState, FlatMapping, FlatMapping]:
"""Load a model based on the finetune settings in config.
Args:
config: training configuration
data: data from get_data
Returns:
Create model, loaded parameters and model state
"""
checkpoint, path = cpstaging.load_checkpoint(config.finetune)
model = create_model(config, data)
params = checkpoint.state.params
model_state = checkpoint.state.model_state
return path, model, params, model_state
def init_model(
data: Dict[str, Any], model: hk.TransformedWithState, rng: hk.PRNGSequence
) -> Tuple[FlatMapping, FlatMapping]:
"""Initialize model and optimizer.
Args:
data: data as from get_data
model: model to initialize
rng: random key sequence
Returns:
Tuple of model parameters and state
"""
params, model_state = model.init(
next(rng), next(cpdata.load_batches(data['train']))[0], training=True)
return params, model_state
def update_checkpoint(
checkpoint: cpstaging.Checkpoint,
trainable_params: FlatMapping, fixed_params: FlatMapping,
model_state: FlatMapping,
optimizer_state: List[optax.TraceState], epoch: int):
"""Update checkpoint.
Args:
checkpoint: checkpoint to update
trainable_params: model parameters that are being trained
fixed_params: model parameters that have been fixed
model_state: model state
optimizer_state: optimizer state
epoch: current epoch
"""
params = hk.data_structures.merge(trainable_params, fixed_params)
checkpoint.state.params = params
checkpoint.state.model_state = model_state
checkpoint.state.optimizer_state = optimizer_state
checkpoint.state.epoch = epoch
class LRScheduler:
"""Base class of simple scheduler, allowing to track current learning rate."""
def __init__(
self, learning_rate: float, learning_rate_decay: float,
num_examples: int, batch_size: int, epochs: int) -> None:
"""Constructs a learning rate scheduler.
Args:
learning_rate: base learning rate to start with
learning_rate_decay: learning rate decay to be applied
num_examples: number of examples per epoch
batch_size: batch size used for training
epochs: total number of epochs
"""
self.base_learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.batch_size = batch_size
self.num_examples = num_examples
self.epochs = epochs
def __call__(self, step: int) -> float:
"""Applies learning rate schedule to compute current learning rate.
Args:
step: training step to compute learning rate for.
Returns:
Updated learning rate.
"""
raise NotImplementedError
class ExponentialLRScheduler(LRScheduler):
"""Exponential learning rate schedule."""
def __call__(self, step: int) -> float:
steps_per_epoch = jnp.ceil(self.num_examples / self.batch_size)
self.current_learning_rate = self.base_learning_rate * (
self.learning_rate_decay**(step // steps_per_epoch))
return self.current_learning_rate
class MultIStepLRScheduler(LRScheduler):
"""Multi-step learning rate schedule."""
def __call__(self, step: int) -> float:
steps_per_epoch = jnp.ceil(self.num_examples / self.batch_size)
epoch = step // steps_per_epoch
epochs_per_step = self.epochs//5
learning_rate_step = jnp.maximum(epoch//epochs_per_step - 1, 0)
self.current_learning_rate = self.base_learning_rate * (
self.learning_rate_decay**learning_rate_step)
return self.current_learning_rate
def get_sgd_optimizer(
momentum: float, nesterov: bool,
lr_scheduler: LRScheduler) -> optax.GradientTransformation:
"""SGD with momentum and lr schedule.
Args:
momentum: momentum parameter
nesterov: whether to use nesterov updates
lr_scheduler: learning rate schedule to use
Returns:
Optimizer
"""
return optax.chain(
(optax.trace(decay=momentum, nesterov=nesterov)
if momentum is not None else optax.identity()),
optax.scale_by_schedule(lambda step: -lr_scheduler(step))
)
def get_adam_optimizer(
b1: float, b2: float, eps: float,
lr_scheduler: LRScheduler) -> optax.GradientTransformation:
"""SGD with momentum and lr schedule.
Args:
b1: decay rate for first moment
b2: decay rate for second moment
eps: small constant applied to denominator (see optax docs9
lr_scheduler: learning rate schedule to use
Returns:
Optimizer
"""
return optax.chain(
optax.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=0),
optax.scale_by_schedule(lambda step: -lr_scheduler(step))
)
def compute_weight_decay(params: FlatMapping) -> float:
"""Weight decay computation.
Args:
params: model parameters
Returns:
Weight decay
"""
return sum(
jnp.sum(jnp.square(param)) for param in jax.tree_leaves(params))
def compute_cross_entropy_loss(
logits: jnp.ndarray, labels: jnp.ndarray) -> jnp.ndarray:
"""Compute cross entropy loss.
Args:
logits: logits predicted by model
labels: ground truth labels
Returns:
Mean cross entropy loss
"""
one_hot_labels = jax.nn.one_hot(labels, logits.shape[1])
return jnp.mean(optax.softmax_cross_entropy(
logits, one_hot_labels))
def compute_hinge_size_loss(
confidence_sets: jnp.ndarray, target_size: int,
transform: Callable[[jnp.ndarray], jnp.ndarray],
weights: jnp.ndarray) -> jnp.ndarray:
"""Compute hinge size loss.
Args:
confidence_sets: predicted confidence sets
target_size: target confidence set size
transform: transform to apply on per example computed size
weights: per-example weights to apply
Returns:
Size loss
"""
return jnp.mean(transform(
weights * jnp.maximum(jnp.sum(confidence_sets, axis=1) - target_size, 0)))
def compute_hinge_bounded_size_loss(
confidence_sets: jnp.ndarray, target_size: int,
bound_size: int, bound_weight: float,
transform: Callable[[jnp.ndarray], jnp.ndarray],
weights: jnp.ndarray) -> jnp.ndarray:
"""Compute bounded hinge loss.
Compared to compute_hinge_size_loss, this loss enforces a higher loss
when size exceeds bound_size.
Args:
confidence_sets: predicted confidence sets
target_size: target confidence set size
bound_size: confidence set size at which bound loss starts
bound_weight: weight of bound loss in (0, 1)
transform: transform to apply on per example computed size
weights: per-example weights to apply
Returns:
Bounded size loss
"""
sizes = jnp.sum(confidence_sets, axis=1)
target_loss = jnp.maximum(sizes - target_size, 0)
bound_loss = jnp.maximum(sizes - bound_size, 0)
size_loss = jnp.mean(transform(
weights * ((1 - bound_weight) * target_loss + bound_weight * bound_loss)))
return size_loss
def compute_probabilistic_size_loss(
confidence_sets: jnp.ndarray,
weights: jnp.ndarray) -> jnp.ndarray:
"""Compute probabilistic size loss.
This size loss is motivated by interpreting the confidence set predictions
as Bernoulli probabilities of a specific label being part of it.
The sum of these Bernoulli variables is distributed according to a
Poisson binomial distribution. This loss is the negative likelihood
of this distribution for a size of 1.
Args:
confidence_sets: predicted sets
weights: per-example weights to apply
Returns:
Size loss
"""
classes = confidence_sets.shape[1]
one_hot_labels = jnp.expand_dims(jnp.identity(classes), axis=0)
repeated_confidence_sets = jnp.repeat(
jnp.expand_dims(confidence_sets, axis=2), classes, axis=2)
loss = one_hot_labels * repeated_confidence_sets + (
1 - one_hot_labels) * (1 - repeated_confidence_sets)
loss = jnp.prod(loss, axis=1)
loss = jnp.sum(loss, axis=1)
return jnp.mean(weights * loss)
def compute_coverage_loss(
confidence_sets: jnp.ndarray,
labels: jnp.ndarray, alpha: float,
transform: Callable[[jnp.ndarray], jnp.ndarray] = jnp.square
) -> jnp.ndarray:
"""Compute squared coverage loss.
Computes empirical coverage on batch and a squared loss between empirical
coverage and target coverage defined as 1 - alpha.
Args:
confidence_sets: predicted confidence sets
labels: ground truth labels
alpha: confidence level
transform: transform to apply on error, e.g., square
Returns:
Squared coverage loss
"""
one_hot_labels = jax.nn.one_hot(labels, confidence_sets.shape[1])
return transform(jnp.mean(jnp.sum(
confidence_sets * one_hot_labels, axis=1)) - (1 - alpha))
def compute_general_classification_loss(
confidence_sets: jnp.ndarray, labels: jnp.ndarray,
loss_matrix: jnp.ndarray) -> jnp.ndarray:
"""Compute general classification loss on confidence sets.
Besides enforcing that the true label is contained in the confidence set,
this loss also penalizes any other label in the set according to the
loss_matrix.
Args:
confidence_sets: predicted confidence sets
labels: ground truth labels
loss_matrix: symmetric loss matrix
Returns:
Classification loss
"""
one_hot_labels = jax.nn.one_hot(labels, confidence_sets.shape[1])
l1 = (1 - confidence_sets) * one_hot_labels * loss_matrix[labels]
l2 = confidence_sets * (1 - one_hot_labels) * loss_matrix[labels]
loss = jnp.sum(jnp.maximum(l1 + l2, jnp.zeros_like(l1)), axis=1)
return jnp.mean(loss)
def compute_general_binary_cross_entropy_loss(
confidence_sets: jnp.ndarray, labels: jnp.ndarray,
loss_matrix: jnp.ndarray) -> jnp.ndarray:
"""Compute general binary cross-entropy loss.
Args:
confidence_sets: predicted confidence sets
labels: ground truth labels
loss_matrix: symmetric loss matrix
Returns:
Binary cross-entropy loss
"""
one_hot_labels = jax.nn.one_hot(labels, confidence_sets.shape[1])
l1 = loss_matrix[labels] * one_hot_labels * jnp.log(confidence_sets + 1e-8)
l2 = loss_matrix[labels] * (1 - one_hot_labels) * jnp.log(
1 - confidence_sets + 1e-8)
loss = jnp.sum(jnp.minimum(l1 + l2, jnp.zeros_like(l1)), axis=1)
return jnp.mean(- loss)
def update(
trainable_params: FlatMapping, fixed_params: FlatMapping,
inputs: jnp.ndarray, labels: jnp.ndarray,
model_state: FlatMapping, training: bool,
optimizer_state: List[optax.TraceState],
rng: jnp.ndarray,
loss_fn: LossFn,
optimizer: optax.GradientTransformation
) -> Tuple[jnp.ndarray, FlatMapping, FlatMapping,
List[optax.TraceState], Dict[str, Any]]:
"""Update parameters using the given loss function.
The loss function is supposed to return the loss, followed by a tuple
consisting of the new model state and a dictionary that may contain additional
information or can be empty.
Args:
trainable_params: model parameters to update
fixed_params: model parameters not to update, i.e., fixed
inputs: input examples
labels: ground truth examples
model_state: model state
training: training mode
optimizer_state: optimizer state
rng: random key
loss_fn: loss function to use
optimizer: optax optimizer
Returns:
Tuple consisting of loss, new parameters, new model state, new optimizer
state and a dictionary with additional information from the loss function
"""
(loss, (new_model_state, mixed)), grad = jax.value_and_grad(
loss_fn, has_aux=True)(trainable_params, fixed_params, inputs, labels,
model_state, training, rng)
updates, new_optimizer_state = optimizer.update(grad, optimizer_state)
new_params = optax.apply_updates(trainable_params, updates)
return loss, new_params, new_model_state, new_optimizer_state, mixed
| conformal_training-main | train_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration for training."""
import ml_collections as collections
def get_conformal_config() -> collections.ConfigDict:
"""Default configuration for coverage and conformal training."""
config = collections.ConfigDict()
config.method = 'threshold_logp'
# Defines groups of classes for reducing mis-coverage using the
# classification loss:
# (see experiments/run_mnist.py for examples)
config.class_groups = ()
# Confidence level to use during training:
config.alpha = 0.01
# The target alpha to enforce using the coverage loss, mostly
# relevant for coverage training:
config.target_alpha = 0.01
# Temperature for soft thresholding:
config.temperature = 1.
# Dispersion for smooth/differentiable sorting:
config.dispersion = 0.1
# Weight of the inefficiency loss:
config.size_weight = 1.
# Which coverage loss to use, see train_coverage.py for options.
config.coverage_loss = 'none'
# Loss matrix to use in the classification loss:
# (see experiments/run_fashion_mnist.py for examples)
config.loss_matrix = ()
# Optional cross-entropy loss in addition to inefficiency/classification
# loss:
config.cross_entropy_weight = 0.
# Which size loss to use, mainly valid or normal:
config.size_loss = 'valid'
# Loss transform, usually identity or log:
config.size_transform = 'identity'
config.size_bound = 3.
config.size_bound_weight = 0.9
config.size_weights = ()
config.loss_transform = 'log'
config.rng = False
return config
def get_config() -> collections.ConfigDict:
"""Default configuration for training.
Returns:
Configuration as ConfigDict.
"""
config = collections.ConfigDict()
# Architecture: mlp, cnn or resnet.
config.architecture = 'mlp'
config.cnn = collections.ConfigDict()
config.cnn.channels = 32
config.cnn.layers = 3
config.cnn.kernels = 3
config.cnn.activation = 'relu'
config.mlp = collections.ConfigDict()
config.mlp.units = 32
config.mlp.layers = 0
config.mlp.activation = 'relu'
config.resnet = collections.ConfigDict()
config.resnet.version = 34
config.resnet.channels = 4
config.resnet.resnet_v2 = True
config.resnet.init_logits = True
# Optimizer: sgd or adam.
config.optimizer = 'sgd'
config.adam = collections.ConfigDict()
config.adam.b1 = 0.9
config.adam.b2 = 0.999
config.adam.eps = 1e-8
config.sgd = collections.ConfigDict()
config.sgd.momentum = 0.9
config.sgd.nesterov = True
# Learning rate schedules:
config.learning_rate_schedule = 'exponential'
config.step = collections.ConfigDict()
config.step.learning_rate_decay = 0.1
config.exponential = collections.ConfigDict()
config.exponential.learning_rate_decay = 0.5
# Training mode: normal, coverage or conformal:
config.mode = 'normal'
config.coverage = get_conformal_config()
# Fixed threshold for coverage training:
config.coverage.tau = 1.
# When fine-tuning a model, fix threshold tau based on that many
# batches of the training set:
config.coverage.calibration_batches = 10
config.conformal = get_conformal_config()
# Fraction of each batch to use for (smooth) calibration.
config.conformal.fraction = 0.5
# General learning hyper-parameters:
config.learning_rate = 0.01
config.momentum = 0.9
config.weight_decay = 0.0005
config.batch_size = 500
config.test_batch_size = 100
config.epochs = 10
config.finetune = collections.ConfigDict()
config.finetune.enabled = False
# Also update/fine-tune model state:
config.finetune.model_state = True
# Which layers to fine-tune:
config.finetune.layers = 'batch_norm_1,linear_2'
# Whether to re-initialize selected layers or not:
config.finetune.reinitialize = True
# This is the path from which the model-to-be-fine-tuned will be loaded:
config.finetune.path = './test/'
# Path to save checkpoints and final predictions to:
config.path = './test/'
config.seed = 0
config.checkpoint_frequency = 5
config.resampling = 0
config.whitening = True
config.cifar_augmentation = 'standard'
# How many validation examples to take from the training examples::
config.val_examples = 5000
config.dataset = 'mnist'
config.jit = False
return config
| conformal_training-main | config.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variational sorting networks."""
import functools
import jax
import jax.numpy as jnp
def _swap_prob_hard(x1, x2):
return jnp.array(jnp.greater(x1, x2), dtype=jnp.float32)
_DELTA_THRESHOLD_EXPECTED = 0.001
_DELTA_THRESHOLD_SAMPLE = 0.001
_EPS = 1e-9
def _swap_prob_entropy_reg(x1, x2, dispersion=1.0):
"""Swapping probability, entropy regularization."""
d = 2 * jax.nn.relu((x2-x1))/dispersion
d2 = 2*jax.nn.relu((x1-x2))/dispersion
return jnp.exp(d2 - jnp.logaddexp(d, d2))
def _swap_prob_entropy_reg_l2(x1, x2, dispersion=1.0):
"""Swapping probability, entropy regularization."""
d = 2*jnp.square(jax.nn.relu(x2-x1))/dispersion
d2 = 2*jnp.square(jax.nn.relu(x1-x2))/dispersion
return jnp.exp(d2 - jnp.logaddexp(d, d2))
def _swap_prob_entropy_reg_lp(x1, x2, dispersion=1.0, norm_p=1.0):
"""Swapping probability, entropy regularization."""
d = 2*jnp.power(jax.nn.relu(x2-x1), norm_p)/dispersion
d2 = 2*jnp.power(jax.nn.relu(x1-x2), norm_p)/dispersion
return jnp.exp(d2 - jnp.logaddexp(d, d2))
def butterfly(lam, x1, x2):
return lam*x2+(1-lam)*x1, lam*x1+(1-lam)*x2
def forward_step(
x,
stage_idx,
comms,
dispersion=1.0,
swap_prob_fun=_swap_prob_entropy_reg,
hard_swap_prob_fun=_swap_prob_hard,
key=None):
"""Computes swapping probabilities at stage_idx of the sorting network."""
idx1 = comms["edge_list"][stage_idx][:, 0]
idx2 = comms["edge_list"][stage_idx][:, 1]
x1, x2 = butterfly(hard_swap_prob_fun(x[idx1], x[idx2]), x[idx1], x[idx2])
if key is None:
lam = swap_prob_fun(x[idx1], x[idx2], dispersion)
else:
subkey = jax.random.split(key, comms["edge_list"][stage_idx].shape[0])
lam = swap_prob_fun(subkey, x[idx1], x[idx2], dispersion)
x = x.at[idx1].set(x1, indices_are_sorted=True)
x = x.at[idx2].set(x2, indices_are_sorted=True)
return x, lam
def backward_step(u, stage_idx, comms, lam):
"""Executes in parallel stage_idx of the sorting network."""
idx1 = comms["edge_list"][stage_idx][:, 0]
idx2 = comms["edge_list"][stage_idx][:, 1]
if len(u.shape) > 1:
u1, u2 = butterfly(jnp.reshape(lam, (lam.shape[0], 1)),
u[idx1, :], u[idx2, :])
u = u.at[idx1, :].set(u1, indices_are_sorted=True)
u = u.at[idx2, :].set(u2, indices_are_sorted=True)
else:
u1, u2 = butterfly(lam, u[idx1], u[idx2])
u = u.at[idx1].set(u1, indices_are_sorted=True)
u = u.at[idx2].set(u2, indices_are_sorted=True)
return u
def forward_only_step(
x, v,
stage_idx,
comms,
dispersion=1.0,
swap_prob_fun=_swap_prob_entropy_reg,
hard_swap_prob_fun=_swap_prob_hard,
key=None):
"""Executes in parallel stage_idx of the sorting network."""
idx1 = comms["edge_list"][stage_idx][:, 0]
idx2 = comms["edge_list"][stage_idx][:, 1]
x1, x2 = butterfly(hard_swap_prob_fun(x[idx1], x[idx2]), x[idx1], x[idx2])
if key is None:
lam = swap_prob_fun(x[idx1], x[idx2], dispersion)
else:
subkey = jax.random.split(key, comms["edge_list"][stage_idx].shape[0])
lam = swap_prob_fun(subkey, x[idx1], x[idx2], dispersion)
x = x.at[idx1].set(x1, indices_are_sorted=True)
x = x.at[idx2].set(x2, indices_are_sorted=True)
if len(v.shape) > 1:
v1, v2 = butterfly(jnp.reshape(lam, (lam.shape[0], 1)),
v[idx1, :], v[idx2, :])
v = v.at[idx1, :].set(v1, indices_are_sorted=True)
v = v.at[idx2, :].set(v2, indices_are_sorted=True)
else:
v1, v2 = butterfly(lam, v[idx1], v[idx2])
v = v.at[idx1].set(v1, indices_are_sorted=True)
v = v.at[idx2].set(v2, indices_are_sorted=True)
return x, v, lam
def costfun(target_vec, initial_vec, norm_p=None):
"""Computes pairwise p-norm between entries of a vector.
Given two vectors y, x, this function computes
Args:
target_vec: y vector (corresponds to the columns)
initial_vec: x vector (corresponds to the rows)
norm_p: norm parameter (Default=1, Euclidian (square distance)=2)
Returns:
costmat: a matrix C with entries C_ij = |y_i - x_j|^p
"""
dist = (jnp.reshape(target_vec, (target_vec.shape[0], 1))
- jnp.reshape(initial_vec, (1, initial_vec.shape[0])))
if norm_p is None or norm_p == 1:
return jnp.abs(dist)
elif norm_p == 2:
return jnp.square(dist)
else:
return jnp.power(jnp.abs(dist), norm_p)
def permutation_entropy(perm):
"""Entropy of a soft permutation matrix.
Args:
perm : Soft permutation with marginals equal to the ones vector.
Returns:
entropy: H_n[P] = -sum_{ij} P_{ij} log P_{ij} + n log(n)
"""
length = perm.shape[0]
neg_entr = jnp.where(jnp.greater(perm, _EPS) * jnp.less(perm, 1.0-_EPS),
perm*jnp.log(perm), 0.0)
entropy = -jnp.sum(neg_entr) + length*jnp.log(length)
return entropy
def permutation_elbo(perm, x, dispersion, norm_p=None, target_vec=None):
if target_vec is None:
target_vec = jnp.sort(x)
cost_matrix = costfun(target_vec, x, norm_p=norm_p) / dispersion
fidelity = - jnp.trace(cost_matrix.T.dot(perm))
entropy = permutation_entropy(perm)
elbo = fidelity + entropy
return elbo, fidelity, entropy
class VariationalSortingNet(object):
"""Class for efficient and differentiable order statistics."""
def __init__(
self, comms,
smoothing_strategy="entropy_reg",
sorting_strategy="hard",
sorting_dispersion=0.001,
norm_p=1):
"""Generate a sorting network that sort the input vector and values.
Args:
comms: Communication pattern (obtained via sorting_nets.comms* functions)
smoothing_strategy: How to sort the values.
(default="entropy_reg")
sorting_strategy: How to sort the keys. {hard, entropy_reg}
(default="hard")
sorting_dispersion: Dispersion parameter to sort the input vector x.
(default=0.001)
Only used when sorting_strategy is not hard
norm_p: norm to use for the cost function (default=1)
"""
assert smoothing_strategy in ["entropy_reg"]
assert sorting_strategy in ["hard", "entropy_reg"]
assert norm_p > 0
if norm_p == 1 or norm_p is None:
norm_choice = 1
elif norm_p == 2:
norm_choice = 2
else:
norm_choice = 0
self.comms = comms
if smoothing_strategy == "entropy_reg":
funcs = [functools.partial(_swap_prob_entropy_reg_lp, norm_p=norm_p),
_swap_prob_entropy_reg,
_swap_prob_entropy_reg_l2]
swap_prob_fun = funcs[norm_choice]
self._is_sampler = False
if sorting_strategy == "hard":
hard_swap_prob_fun = _swap_prob_hard
elif sorting_strategy == "entropy_reg":
hard_swap_prob_fun = functools.partial(
_swap_prob_entropy_reg, dispersion=sorting_dispersion)
if self._is_sampler:
self.stage_fwd_only = functools.partial(
forward_only_step, swap_prob_fun=swap_prob_fun,
hard_swap_prob_fun=hard_swap_prob_fun)
self.stage_fwd = functools.partial(
forward_step, swap_prob_fun=swap_prob_fun,
hard_swap_prob_fun=hard_swap_prob_fun)
else:
self.stage_fwd_only = functools.partial(
forward_only_step, swap_prob_fun=swap_prob_fun,
hard_swap_prob_fun=hard_swap_prob_fun, key=None)
self.stage_fwd = functools.partial(
forward_step, swap_prob_fun=swap_prob_fun,
hard_swap_prob_fun=hard_swap_prob_fun, key=None)
def forward_only(
self, x, v, u=None, dispersion=1.,
lower=0, upper=None, key=None):
r"""Evaluate order statistics u^\top P(x) v by forward only propagation.
This function should be preferred over forward_backward when implementing
cost functions for large models.
Args:
x : Input vector that determines the soft permutation P that approximately
brings x into sorted ordeer
v : Values to be smoothly sorted
u : (Optional) test vector, default = identity
dispersion : Smoothing parameter
lower : Index of the first stage of the sorting network to start the sort
upper : Final stage to finish the sort
key: (optional) Random seed to use for the forward sampling algorithm
Returns:
x_sorted : hard sorted vectors
orderstats : Result of u^\top P(x) v
"""
assert self.comms["num_wires"] == x.shape[0]
if upper is None:
upper = self.comms["num_stages"]
if not self._is_sampler:
for i in range(lower, upper):
x, v, _ = self.stage_fwd_only(x, v, i,
self.comms, dispersion=dispersion)
else:
subkey = jax.random.split(key, upper-lower)
for i in range(lower, upper):
x, v, _ = self.stage_fwd_only(x, v, i,
self.comms,
dispersion=dispersion,
key=subkey[i])
if u is None:
return x, v
else:
return x, u.T.dot(v)
def forward_backward(
self, x, u,
v=None, dispersion=1.,
lower=0, upper=None, key=None):
r"""Evaluate order statistics u^\top P(x) v by forward-backward.
This function should be avoided when implementing cost functions for
large models, as it stores swapping probabilities. Use forward_only to be
preferred.
Args:
x : Input vector that determines the soft permutation P that approximately
brings x into sorted order
u : Test vector to be transformed by transpose(P(x))
v : (Optional) Values to be sorted, default = identity
dispersion : Smoothing parameter
lower : Index of the first stage of the sorting network to start the sort
upper : Final stage to finish the sort
key: (optional) Random seed to use for the forward sampling algorithm
Returns:
x_sorted : hard sorted vectors
orderstats : Result of u^\top P(x) v
lambdas : Structure containing the swap probabilities
"""
assert self.comms["num_wires"] == x.shape[0]
if upper is None:
upper = self.comms["num_stages"]
# forward pass
lambdas = []
if not self._is_sampler:
for i in range(lower, upper):
x, lam = self.stage_fwd(x, i, self.comms, dispersion=dispersion)
lambdas.append(lam)
else:
subkey = jax.random.split(key, upper-lower)
for i in range(lower, upper):
x, lam = self.stage_fwd(x, i,
self.comms,
dispersion=dispersion,
key=subkey[i])
lambdas.append(lam)
# Backward pass.
for i in reversed(range(lower, upper)):
u = backward_step(u, i, self.comms, lambdas[i-lower])
if v is None:
return x, u.T, lambdas
else:
return x, u.T.dot(v), lambdas
def sort(self, x, dispersion, key=None):
"""Smooth sort."""
_, x_ss = self.forward_only(x, x, dispersion=dispersion, key=key)
return x_ss
def sort_tester(self, x, dispersion, key=None):
"""Smooth sort."""
xh, x_ss = self.forward_only(x, x, dispersion=dispersion, key=key)
return xh, x_ss
def ismax(self, x, dispersion, key=None):
r"""Probabilities that maximum of x is x[i] for i=0..len(x)-1."""
length = self.comms["num_wires"]
u = jax.nn.one_hot(length-1, length)
_, res, _ = self.forward_backward(x, u=u, dispersion=dispersion, key=key)
return res
def max(self, x, dispersion, key=None):
length = self.comms["num_wires"]
u = jax.nn.one_hot(length-1, length)
_, x_ss = self.forward_only(x, x, u=u, dispersion=dispersion, key=key)
return x_ss
def ismin(self, x, dispersion, key=None):
r"""Probabilities that minimum of x is x[i] for i=0..len(x)-1."""
length = self.comms["num_wires"]
u = jax.nn.one_hot(0, length)
_, res, _ = self.forward_backward(x, u=u, dispersion=dispersion, key=key)
return res
def min(self, x, dispersion, key=None):
length = self.comms["num_wires"]
u = jax.nn.one_hot(0, length)
_, x_ss = self.forward_only(x, x, u=u, dispersion=dispersion, key=key)
return x_ss
def isquantile(self, x, dispersion, alpha=0.5, tau=0.5, key=None):
r"""Probabilities that the alpha quantile of x is x[i] for i=0..len(x)-1."""
length = self.comms["num_wires"]
idx1 = jnp.floor(alpha * (length-1))
idx2 = jnp.ceil(alpha * (length-1))
u = tau * jax.nn.one_hot(idx2, length)
u += (1 - tau) * jax.nn.one_hot(idx1, length)
_, res, _ = self.forward_backward(x, u=u, dispersion=dispersion, key=key)
return res
def quantile(self, x, dispersion, alpha=0.5, tau=0.5, key=None):
"""Retrieves the smoothed alpha quantile."""
length = self.comms["num_wires"]
idx1 = jnp.floor(alpha * (length-1))
idx2 = jnp.ceil(alpha * (length-1))
u = tau * jax.nn.one_hot(idx2, length)
u += (1 - tau) * jax.nn.one_hot(idx1, length)
_, x_ss = self.forward_only(x, x, u=u, dispersion=dispersion, key=key)
return x_ss
def ismedian(self, x, dispersion, tau=0.5, key=None):
r"""Probabilities that the median of x is x[i] for i=0..len(x)-1.
Args:
x : jnp.array to be sorted
dispersion: Smoothing parameter
tau: an arbitrary parameter in [0, 1] for resolving ties
key: seed (used only if self.is_sampler is true)
Returns:
result: median of x
"""
return self.isquantile(x, dispersion=dispersion, alpha=0.5,
tau=tau, key=key)
def median(self, x, dispersion, tau=0.5, key=None):
"""Retrieves the smoothed median."""
return self.quantile(x, dispersion, alpha=0.5, tau=tau, key=key)
def istopk(self, x, dispersion, topk, key=None):
"""Smooth discrete distribution with a mode highest k entries."""
length = self.comms["num_wires"]
u = jnp.sum(jax.nn.one_hot(range(length-1, length-topk-1, -1), length),
axis=0)
_, res, _ = self.forward_backward(x, u=u, dispersion=dispersion, key=key)
return res
def sortperm(self, x, dispersion, key=None, full_output=False):
"""Smoothed sorting permutation of x."""
length = self.comms["num_wires"]
u = jnp.eye(length)
if full_output:
xh, res, lambdas = self.forward_backward(x, u=u, dispersion=dispersion,
key=key)
return xh, res, lambdas
else:
_, res, _ = self.forward_backward(x, u=u, dispersion=dispersion, key=key)
return res
def subperm(self, x, dispersion, idx, from_top=False, key=None):
"""Retrieves a subset of the sorting permutation.
Args:
x : jnp.array to be sorted
dispersion: Smoothing parameter
idx: Indices of columns in an arbitrary order
from_top: Flag to interpret idx (default=False).
When from_top == True, the maximum is retrieved with idx = [0]
When from_top == False, the maximum is retrieved with idx = [length-1]
key: seed (used only if self.is_sampler is true)
Returns:
res: Result of running the order statistics.
"""
length = self.comms["num_wires"]
if from_top:
u = jnp.flipud(jnp.eye(length)[:, idx])
else:
u = jnp.eye(length)[:, idx]
_, res, _ = self.forward_backward(x, u=u, dispersion=dispersion, key=key)
return res
def log_likelihood_max(self, x, v, dispersion, output_log_scale=True):
if output_log_scale:
return jnp.log(_EPS + self.ismax(x, dispersion).dot(v))
else:
return self.ismax(x, dispersion).dot(v)
def log_likelihood_order(self, x, order, dispersion):
target_perm = jax.nn.one_hot(order, len(order), dtype=jnp.float32).T
inner = jnp.diag(self.sortperm(x, dispersion).dot(target_perm))
return jnp.sum(jnp.log(_EPS + inner))
| conformal_training-main | variational_sorting_net.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various simple architectures."""
from typing import List, Optional, Tuple, Sequence, Mapping, Any
import haiku as hk
import jax
from jax import numpy as jnp
def _check_create(
classes: int = 10, activation: Optional[str] = None,
whitening: Optional[Tuple[jnp.ndarray, jnp.ndarray]] = None):
"""Helper to check arguments for creating models.
Args:
classes: number of output classes
activation: activation function to use or None
whitening: None or tuple of means and stds to use for whitening
Raises:
ValueError: invalid arguments for architecture creation
"""
if classes < 1:
raise ValueError('Expecting at least 1 class.')
if activation is not None:
if activation not in ['relu', 'tanh']:
raise ValueError('Unsupported activation.')
if whitening is not None:
if len(whitening) != 2:
raise ValueError(
'Expecting whitening to be tuple containing means and std.')
def _apply_whitening(
inputs: jnp.ndarray,
whitening: Optional[Tuple[jnp.ndarray, jnp.ndarray]]) -> jnp.ndarray:
"""Apply data whitening.
Args:
inputs: inputs
whitening: mean and std for whitening as tuple
Returns:
Whitened inputs
"""
if whitening is not None:
inputs = (inputs - whitening[0].reshape((1, 1, 1, -1)))
inputs = inputs / whitening[1].reshape((1, 1, 1, -1))
return inputs
def create_mlp(
classes: int = 10,
activation: str = 'relu',
units: Optional[List[int]] = None,
whitening: Optional[Tuple[jnp.ndarray, jnp.ndarray]] = None
)-> hk.TransformedWithState:
"""Simple MLP architecture.
Create an MLP with the given output classes and hidden layers.
Defaults to a linear model.
Args:
classes: number of output classes
activation: activation function to use
units: list of hidden units per hidden layer
whitening: None or tuple of means and stds to use for whitening
Returns:
Created jax function representing the MLP.
Raises:
ValueError: invalid architecture arguments
"""
if units is None:
units = []
_check_create(classes, activation=activation, whitening=whitening)
def forward(inputs, training):
inputs = _apply_whitening(inputs, whitening)
inputs = jnp.reshape(inputs, [inputs.shape[0], -1])
for unit in units:
inputs = hk.Linear(unit)(inputs)
inputs = hk.BatchNorm(True, True, 0.9)(inputs, training)
inputs = getattr(jax.nn, activation)(inputs)
inputs = hk.Linear(classes)(inputs)
return inputs
return hk.transform_with_state(forward)
def create_cnn(
classes: int = 10, activation: str = 'relu',
channels: Optional[List[int]] = None,
kernels: Optional[List[int]] = None,
whitening: Optional[Tuple[jnp.ndarray, jnp.ndarray]] = None
) -> hk.TransformedWithState:
"""Simple CNN architecture.
Create a CNN with several convolutional layers, followed by
batch normalization, ReLU and max pooling and a final fully connected layer.
Args:
classes: number of output classes
activation: activation function to use
channels: convolutional channels of each convolutional stage
kernels: kernel size for each convolutional layer
whitening: None or tuple of means and stds to use for whitening
Returns:
Created jax function representing the CNN
Raises:
ValueError: invalid architecture arguments
"""
if channels is None:
channels = [32, 64, 128]
if kernels is None:
kernels = [3, 3, 3]
if not channels:
raise ValueError('Expecting at least on convolutional channels.')
if len(channels) != len(kernels):
raise ValueError('Expecting same number of channels and kernels.')
_check_create(classes, activation=activation, whitening=whitening)
def forward(inputs, training):
inputs = _apply_whitening(inputs, whitening)
for l in range(len(channels)):
c = channels[l]
k = kernels[l]
inputs = hk.Conv2D(output_channels=c, kernel_shape=[k, k])(inputs)
inputs = hk.BatchNorm(True, True, 0.9)(inputs, training)
inputs = getattr(jax.nn, activation)(inputs)
# window_shape and strides needs to be tuple to avoid deprecated warning.
inputs = hk.MaxPool(
window_shape=(2, 2, 1), strides=(2, 2, 1), padding='SAME')(inputs)
inputs = jnp.reshape(inputs, [inputs.shape[0], -1])
inputs = hk.Linear(classes)(inputs)
return inputs
# transform_with_state necessary because of batch norm.
return hk.transform_with_state(forward)
class ResNet(hk.nets.ResNet):
"""Overwrite Haiku's ResNet model for Cifar10."""
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
):
"""Constructs a ResNet model.
In contrast to Haiku's original implementation, the first convolutional
layer uses 3x3 convolutional kernel with stride 1.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
"""
super(ResNet, self).__init__(
blocks_per_group, num_classes, bn_config, resnet_v2, bottleneck,
channels_per_group, use_projection, logits_config, name)
self.initial_conv = hk.Conv2D(
output_channels=64, kernel_shape=3, stride=1,
with_bias=False, padding='SAME', name='initial_conv')
def _check_create_resnet(version: int, channels: int):
"""Helper to check arguments for creating resnets.
Args:
version: resnet version
channels: resnet channels to start with
Raises:
ValueError: invalid arguments for architecture creation
"""
if version not in [18, 34, 50, 101, 152, 200]:
raise ValueError('Only ResNet-[18, 34, 50, 101, 152, 200] supported.')
if channels < 1:
raise ValueError('Expecting at least one channel to start with.')
def create_resnet(
classes: int = 10,
version: Optional[int] = 18,
channels: Optional[int] = None,
resnet_v2: Optional[bool] = False,
whitening: Optional[Tuple[jnp.ndarray, jnp.ndarray]] = None,
logit_w_init: Optional[hk.initializers.Initializer] = jnp.zeros
) -> hk.TransformedWithState:
"""Simple wrapper for Haiku's ResNet implementation.
Creats a ResNet-version with the given channels in the first block
and whitening if desired. See Haiku doc for details on structure and
resnet_v2.
Args:
classes: number of output classes
version: version, i.e., depth of ResNet
channels: number of channels in first block
resnet_v2: whether to use ResNet v2
whitening: None or tuple of means and stds to use for whitening
logit_w_init: logit weights initializer
Returns:
Created jax function representing the ResNet
Raises:
ValueError: invalid architecture arguments
"""
if version not in [18, 34, 50, 101, 152, 200]:
raise ValueError('Only ResNet-[18, 34, 50, 101, 152, 200] supported.')
if channels < 1: # pytype: disable=unsupported-operands
raise ValueError('Expecting at least one channel to start with.')
_check_create_resnet(version, channels)
_check_create(classes, activation=None, whitening=whitening)
resnet_config = ResNet.CONFIGS[version]
# channels defines the number of channels for first block; the remaining
# blocks' channels are derived by doubling.
resnet_config['channels_per_group'] = tuple([
channels*2**i for i in range(len(resnet_config['blocks_per_group']))
])
# The very first convolutional in Haiku ResNets is hard-coded to 64.
# So if channels is not 64, we need to add a projection.
if channels != 64:
resnet_config['use_projection'] = tuple([True]*4)
def forward(inputs, training):
inputs = _apply_whitening(inputs, whitening)
net = ResNet(
num_classes=classes, resnet_v2=resnet_v2,
logits_config={'w_init': logit_w_init}, **resnet_config)
return net(inputs, training)
return hk.transform_with_state(forward)
| conformal_training-main | models.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conformal prediction calibration and predictions."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
import conformal_prediction as cp
import test_utils as cptutils
class ConformalPredictionTest(parameterized.TestCase):
def setUp(self):
super(ConformalPredictionTest, self).setUp()
np.random.seed(0)
@parameterized.parameters([
dict(array=np.array([]), q=0.5),
dict(array=np.linspace(0, 1, 100), q=-0.1),
dict(array=np.linspace(0, 1, 100), q=1.1),
dict(array=np.linspace(0, 1, 100).reshape(2, 50), q=0.5),
])
def test_conformal_quantile_errors(self, array, q):
with self.assertRaises(ValueError):
cp.conformal_quantile_with_checks(jnp.array(array), q=q)
def _test_validation_coverage(self, calibrate, predict, alpha):
num_examples = 10000
num_classes = 10
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 5)
threshold = calibrate(probabilities, labels, alpha)
confidence_sets = predict(probabilities, threshold)
# Some methods will have perfect coverage in this case as the true class
# always represents the largest probability.
# Others will only have 1 - alpha coverage as coverage is independent of
# sorting.
self.assertGreaterEqual(
jnp.sum(confidence_sets[jnp.arange(confidence_sets.shape[0]), labels]),
int(num_examples * (1 - alpha)))
@parameterized.parameters([
dict(alpha=0.1),
dict(alpha=0.01),
])
def test_threshold_confidence_sets_validation_coverage(self, alpha):
calibrate = cp.calibrate_threshold_with_checks
predict = cp.predict_threshold_with_checks
self._test_validation_coverage(calibrate, predict, alpha)
@parameterized.parameters([
dict(probabilities=np.zeros((100, 10)),
labels=np.zeros((100)).astype(int)),
])
def test_calibrate_predict_threshold_jit(self, probabilities, labels):
calibrate_threshold_fn = jax.jit(
functools.partial(cp.calibrate_threshold, alpha=0.1))
predict_threshold_fn = jax.jit(cp.predict_threshold)
tau = calibrate_threshold_fn(probabilities, labels)
confidence_sets = predict_threshold_fn(probabilities, tau)
chex.assert_shape(confidence_sets, probabilities.shape)
@parameterized.parameters([
dict(probabilities=np.array([]), tau=0),
dict(probabilities=np.zeros((100)), tau=0),
])
def test_predict_threshold_errors(self, probabilities, tau):
with self.assertRaises(ValueError):
cp.predict_threshold_with_checks(jnp.array(probabilities), tau)
@parameterized.parameters([
dict(alpha=0.1, k_reg=None, lambda_reg=None),
dict(alpha=0.01, k_reg=None, lambda_reg=None),
dict(alpha=0.1, k_reg=1, lambda_reg=0),
dict(alpha=0.01, k_reg=1, lambda_reg=0),
dict(alpha=0.1, k_reg=1, lambda_reg=0.5),
dict(alpha=0.01, k_reg=1, lambda_reg=0.5),
])
def test_raps_confidence_sets_validation_coverage(
self, alpha, k_reg, lambda_reg):
calibrate = functools.partial(
cp.calibrate_raps_with_checks,
k_reg=k_reg, lambda_reg=lambda_reg)
predict = functools.partial(
cp.predict_raps_with_checks, k_reg=k_reg, lambda_reg=lambda_reg)
self._test_validation_coverage(calibrate, predict, alpha)
@parameterized.parameters([
dict(probabilities=np.zeros((100, 10)),
labels=np.zeros((100)).astype(int)),
])
def test_calibrate_predict_raps_jit(self, probabilities, labels):
calibrate_raps_fn = jax.jit(functools.partial(
cp.calibrate_raps, alpha=0.1, k_reg=1, lambda_reg=0.5))
predict_raps_fn = jax.jit(functools.partial(
cp.predict_raps, k_reg=1, lambda_reg=0.5))
rng = jax.random.PRNGKey(0)
tau = calibrate_raps_fn(probabilities, labels, rng=rng)
confidence_sets = predict_raps_fn(probabilities, tau, rng=rng)
chex.assert_shape(confidence_sets, probabilities.shape)
@parameterized.parameters([
dict(probabilities=np.array([]),
labels=np.array([]), alpha=0.1),
dict(probabilities=np.zeros((100)),
labels=np.ones((100), dtype=int), alpha=0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int) * 99, alpha=0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)) * 0.5, alpha=0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=-0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=1.1),
])
def test_calibrate_raps_errors(self, probabilities, labels, alpha):
with self.assertRaises(ValueError):
cp.calibrate_raps_with_checks(
jnp.array(probabilities), jnp.array(labels), alpha, rng=None)
@parameterized.parameters([
dict(probabilities=np.array([]), tau=0.9),
dict(probabilities=np.zeros((100)), tau=0.9),
dict(probabilities=np.zeros((100, 10)), tau=-0.1),
])
def test_predict_raps_errors(self, probabilities, tau):
with self.assertRaises(ValueError):
cp.predict_raps_with_checks(jnp.array(probabilities), tau, rng=None)
if __name__ == '__main__':
absltest.main()
| conformal_training-main | conformal_prediction_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for open sourcing."""
import os
import pickle
from typing import Any, Tuple, Dict
from absl import logging
import jax.numpy as jnp
import ml_collections as collections
def _dump_pickle(mixed: Any, path: str):
"""Write data to a pickle file."""
f = open(path, 'wb')
pickle.dump(mixed, f)
f.close()
logging.info('Wrote %s', path)
def _load_pickle(path: str) -> Any:
"""Load data from a pickle file."""
f = open(path, 'rb')
mixed = pickle.load(f)
f.close()
logging.info('Read %s', path)
return mixed
class Checkpoint:
"""Checkpoint to save and load models."""
class State:
"""State holding parameters, model and optimizer state and epoch."""
def __init__(self):
"""Create a checkpoint state."""
self.params = None
""" (FlatMapping) Model parameters. """
self.model_state = None
""" (FlatMapping) Model state. """
self.optimizer_state = None
"""" (List[optax.TraceState] Optimizer state. """
self.epoch = None
""" (int) Epoch. """
def __init__(self, path: str = './'):
"""Create a checkpoint in the provided path.
Args:
path: path to checkpoint
"""
self.state = Checkpoint.State()
""" (State) State of checkpoint."""
self.path = path
"""(str) Path to checkpoint."""
self.params_file = os.path.join(self.path, 'params.pkl')
""" (str) File to store params. """
self.model_state_file = os.path.join(self.path, 'model_state.pkl')
""" (str) File to store params. """
self.optimizer_state_file = os.path.join(self.path, 'optimizer_state.pkl')
""" (str) File to store params. """
self.epoch_file = os.path.join(self.path, 'epoch.pkl')
""" (str) File to store params. """
def _exists(self):
"""Check if checkpoint exists.
Returns:
true if all checkpoint files were found
"""
complete_checkpoint = True
for path in [
self.params_file, self.model_state_file,
self.optimizer_state_file, self.epoch_file,
]:
if not os.path.isfile(path):
complete_checkpoint = False
return complete_checkpoint
def restore(self):
"""Restore checkpoint from files."""
if not self._exists():
raise ValueError(f'Checkpoint {self.path} not found.')
self.state.params = _load_pickle(self.params_file)
self.state.model_state = _load_pickle(self.model_state_file)
self.state.optimizer_state = _load_pickle(self.optimizer_state_file)
self.state.epoch = _load_pickle(self.epoch_file)
def save(self):
"""Save checkpoint to files."""
os.makedirs(self.path, exist_ok=True)
_dump_pickle(self.state.params, self.params_file)
_dump_pickle(self.state.model_state, self.model_state_file)
_dump_pickle(self.state.optimizer_state, self.optimizer_state_file)
_dump_pickle(self.state.epoch, self.epoch_file)
def restore_or_save(self):
"""Restore or save checkpoint."""
if self._exists():
self.restore()
else:
self.save()
def create_checkpoint(config: collections.ConfigDict) -> Checkpoint:
"""Create a checkpoint.
Args:
config: configuration
Returns:
Checkpoint.
"""
return Checkpoint(config.path)
def load_checkpoint(config: collections.ConfigDict) -> Tuple[Checkpoint, str]:
"""Loads the checkpoint using the provided config.path.
Args:
config: fine-tuning configuration
Returns:
Checkpoint and loaded path
"""
checkpoint = Checkpoint(config.path)
checkpoint.restore()
return checkpoint, config.path
class PickleWriter:
"""Pickle writer to save evaluation."""
def __init__(self, path: str, name: str):
"""Create a writer.
Args:
path: path to directory to write pickle file to
name: name of pickle file, without extension
"""
self.path = os.path.join(path, name + '.pkl')
""" (str) Path to write to."""
def write(self, values: Any):
"""Write data."""
_dump_pickle(values, self.path)
def create_writer(config: collections.ConfigDict, key: str) -> Any:
"""Create a writer to save evaluation results.
Args:
config: configuration
key: identifier for writer
Returns:
Writer
"""
return PickleWriter(config.path, key)
class PickleReader:
"""Pickle reader to load evaluation."""
def __init__(self, path: str, name: str):
"""Create a reader.
Args:
path: path to directory to read from
name: name of pickle file to read, without extension
"""
self.path = os.path.join(path, name + '.pkl')
""" (str) Path to write to."""
def read(self) -> Any:
"""Read pickled data."""
return _load_pickle(self.path)
def load_predictions(
path: str, key: str = 'eval', val_examples: int = 0) -> Dict[str, Any]:
"""Load model predictions/logits for specific experiment.
Args:
path: path to experiment
key: evaluation key to load test and val predictions for
val_examples: number of validation examples used in experiment
Returns:
Dictionary containing groups for evaluation and test/val logits/labels
"""
test_reader = PickleReader(path, f'{key}_test')
val_reader = PickleReader(path, f'{key}_val')
eval_test = test_reader.read()
# Groups are used for evaluation but added optionally later, still
# need to initialize the dict for everything to work properly.
model = {
'data': {'groups': {}},
'test_logits': eval_test['logits'],
'test_labels': eval_test['labels'],
'val_logits': jnp.array([]),
'val_labels': jnp.array([]),
}
test_examples = model['test_labels'].shape[0]
logging.info('loaded %s: %d test examples', path, test_examples)
if val_examples > 0:
eval_val = val_reader.read()
model['val_logits'] = eval_val['logits']
model['val_labels'] = eval_val['labels']
val_examples = model['val_labels'].shape[0]
logging.info('loaded %s: %d val examples', path, val_examples)
return model
| conformal_training-main | open_source_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variational sorting networks."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import sorting_nets
import variational_sorting_net
class VariationalSortingNetTest(parameterized.TestCase):
@parameterized.parameters([
[16, "entropy_reg", "hard", 12],
[16, "entropy_reg", "entropy_reg", 12],
])
def test_sort(self, length, smoothing_strategy, sorting_strategy, prng_key):
dispersion = 0.05
key = jax.random.PRNGKey(prng_key)
subkey, key = jax.random.split(key)
x = jax.random.uniform(subkey, shape=(length,))*5
snets = {
"batcher-bitonic": sorting_nets.comm_pattern_batcher(
length, make_parallel=True)
}
for sn in snets:
bs = variational_sorting_net.VariationalSortingNet(
snets[sn], smoothing_strategy=smoothing_strategy,
sorting_strategy=sorting_strategy)
x_hard, _ = bs.sort_tester(x, dispersion=dispersion, key=subkey)
if sorting_strategy == "hard":
x_sorted = jnp.sort(x)
assert jnp.abs(x_hard[-1] - x_sorted[-1]) < 1e-6
@parameterized.parameters([
[2],
[5],
])
def test_jacobian(self, log2_length):
length = 2 ** log2_length
snet = sorting_nets.comm_pattern_bitonic(log2_length)
bs = variational_sorting_net.VariationalSortingNet(
snet, smoothing_strategy="entropy_reg", sorting_strategy="hard")
jac_sort = jax.jacrev(bs.sort)
key = jax.random.PRNGKey(12)
subkey, key = jax.random.split(key)
x = jax.random.uniform(subkey, shape=(length,))*5
jac = jac_sort(x, dispersion=0.1)
assert jac.shape == (length, length)
if __name__ == "__main__":
absltest.main()
| conformal_training-main | variational_sorting_net_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data utilities."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import ml_collections as collections
import data as cpdata
import data_utils as cpdatautils
DATA_DIR = './data/'
class DataUtilsTest(parameterized.TestCase):
@parameterized.parameters([
dict(cifar_augmentation='standard+autoaugment+cutout'),
])
def test_apply_cifar_augmentation(self, cifar_augmentation):
batch_size = 100
data = cpdata.load_data_split(
'cifar10', val_examples=50000 - batch_size, data_dir=DATA_DIR)
config = collections.ConfigDict()
config.cifar_augmentation = cifar_augmentation
ds = cpdatautils.apply_cifar_augmentation(
config, data['train'], data['shape'])
ds = ds.batch(batch_size)
inputs, _ = next(cpdata.load_batches(ds))
chex.assert_shape(inputs, (batch_size, 32, 32, 3))
if __name__ == '__main__':
absltest.main()
| conformal_training-main | data_utils_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation metrics for conformal prediction."""
from typing import Tuple
import jax
import jax.numpy as jnp
def _check_labels(probabilities: jnp.ndarray, labels: jnp.ndarray):
"""Helper to check shapes or probabilities/sets and labels.
Checks shapes of probabilities of confidence sets and labels for
evaluation.
Args:
probabilities: probabilities or confidence sets
labels: corresponding ground truth labels
Raises:
ValueError if shapes do not match.
"""
if probabilities.ndim != 2:
raise ValueError('Expecting probabilities/confidence sets of '
'shape n_examples x n_classes.')
if labels.ndim != 1:
raise ValueError('Expecting labels of shape n_examples.')
if probabilities.shape[1] == 0:
raise ValueError('Expecting at least one class.')
if probabilities.shape[0] != labels.shape[0]:
raise ValueError('Number of probabilities/confidence sets does '
'not match number of labels.')
if not jnp.issubdtype(labels.dtype, jnp.integer):
raise ValueError('Expecting labels to be integers.')
if jnp.max(labels) >= probabilities.shape[1]:
raise ValueError(
'labels contains more classes than probabilities/confidence sets.')
def _check_one_hot_labels(
probabilities: jnp.ndarray, one_hot_labels: jnp.ndarray):
"""Helper to check shapes of probabilities/sets and one-hot labels.
Args:
probabilities: probabilities or confidence sets
one_hot_labels: corresponding ground truth labels in one-hot format
Raises:
ValueError if shapes do not match.
"""
if probabilities.ndim != 2:
raise ValueError('Expecting probabilities/confidence sets of '
'shape n_examples x n_classes.')
if one_hot_labels.ndim != 2:
raise ValueError('Expecting labels in one-hot format of '
'shape n_examples x n_classes.')
if probabilities.shape[1] == 0:
raise ValueError('Expecting at least one class.')
if probabilities.shape[0] != one_hot_labels.shape[0]:
raise ValueError('Number of probabilities/confidence sets does '
'not match number of labels.')
if probabilities.shape[1] != one_hot_labels.shape[1]:
raise ValueError('Number of classes in probabilities/confidence '
'sets and one-hot labels do not match.')
def _check_conditional_labels(
probabilities: jnp.ndarray,
conditional_labels: jnp.ndarray):
"""Helper to check conditional_labels for metric computation.
Args:
probabilities: probabilities or confidence sets
conditional_labels: labels tp condition on for all examples
Raises:
ValueError if shapes do not match
"""
if conditional_labels.ndim != 1:
raise ValueError('Expecting conditional_labels of shape n_examples.')
if conditional_labels.shape[0] != probabilities.shape[0]:
raise ValueError('Number of probabilities/confidence sets does '
'not match number of conditional labels.')
if not jnp.issubdtype(conditional_labels.dtype, jnp.integer):
raise ValueError('Expecting conditional labels to be integers.')
def compute_conditional_accuracy(
probabilities: jnp.ndarray, labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""Computes conditional accuracy given softmax probabilities and labels.
Conditional accuracy is defined as the accuracy on a subset of the examples
as selected using the conditional label(s). For example, this allows
to compute accuracy conditioned on class labels.
Args:
probabilities: predicted probabilities on test set
labels: ground truth labels on test set
conditional_labels: conditional labels to compute accuracy on
conditional_label: selected conditional label to compute accuracy on
Returns:
Accuracy
"""
selected = (conditional_labels == conditional_label)
num_examples = jnp.sum(selected)
predictions = jnp.argmax(probabilities, axis=1)
error = selected * (predictions != labels)
error = jnp.where(num_examples == 0, 1, jnp.sum(error)/num_examples)
return 1 - error
def compute_conditional_accuracy_with_checks(
probabilities: jnp.ndarray, labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""compute_conditional_accuracy with extra argument checks."""
_check_labels(probabilities, labels)
_check_conditional_labels(probabilities, conditional_labels)
return compute_conditional_accuracy(
probabilities, labels, conditional_labels, conditional_label)
def compute_accuracy(probabilities: jnp.ndarray, labels: jnp.ndarray) -> float:
"""Compute unconditional accuracy using compute_conditional_accuracy."""
return compute_conditional_accuracy(
probabilities, labels, jnp.zeros(labels.shape, int), 0)
def compute_accuracy_with_checks(
probabilities: jnp.ndarray, labels: jnp.ndarray) -> float:
"""compute_accuracy with additional argument checks raising ValuzeError."""
return compute_conditional_accuracy_with_checks(
probabilities, labels, jnp.zeros(labels.shape, int), 0)
def compute_conditional_multi_coverage(
confidence_sets: jnp.ndarray, one_hot_labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""Compute coverage of confidence sets, potentially for multiple labels.
The given labels are assumed to be one-hot labels and the implementation
supports checking coverage of multiple classes, i.e., whether one of
the given ground truth labels is in the confidence set.
Args:
confidence_sets: confidence sets on test set as 0-1 array
one_hot_labels: ground truth labels on test set in one-hot format
conditional_labels: conditional labels to compute coverage on a subset
conditional_label: selected conditional to compute coverage for
Returns:
Coverage.
"""
selected = (conditional_labels == conditional_label)
num_examples = jnp.sum(selected)
coverage = selected * jnp.clip(
jnp.sum(confidence_sets * one_hot_labels, axis=1), 0, 1)
coverage = jnp.where(num_examples == 0, 1, jnp.sum(coverage)/num_examples)
return coverage
def compute_conditional_multi_coverage_with_checks(
confidence_sets: jnp.ndarray, one_hot_labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""compute_conditional_multi_coverage with additional argument checks."""
_check_one_hot_labels(confidence_sets, one_hot_labels)
_check_conditional_labels(confidence_sets, conditional_labels)
return compute_conditional_multi_coverage(
confidence_sets, one_hot_labels, conditional_labels, conditional_label)
def compute_coverage(
confidence_sets: jnp.ndarray, labels: jnp.ndarray) -> float:
"""Compute unconditional coverage using compute_conditional_multi_coverage.
Args:
confidence_sets: confidence sets on test set as 0-1 array
labels: ground truth labels on test set (not in one-hot format)
Returns:
Coverage.
"""
one_hot_labels = jax.nn.one_hot(labels, confidence_sets.shape[1])
return compute_conditional_multi_coverage(
confidence_sets, one_hot_labels, jnp.zeros(labels.shape, int), 0)
def compute_coverage_with_checks(
confidence_sets: jnp.ndarray, labels: jnp.ndarray) -> float:
"""compute_coverage with additional argument checks raising ValueError."""
return compute_conditional_coverage_with_checks(
confidence_sets, labels, jnp.zeros(labels.shape, int), 0)
def compute_conditional_coverage(
confidence_sets: jnp.ndarray, labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""Compute conditional coverage using compute_conditional_multi_coverage.
Args:
confidence_sets: confidence sets on test set as 0-1 array
labels: ground truth labels on test set (not in one-hot format)
conditional_labels: conditional labels to compute coverage on a subset
conditional_label: selected conditional to compute coverage for
Returns:
Conditional coverage.
"""
one_hot_labels = jax.nn.one_hot(labels, confidence_sets.shape[1])
return compute_conditional_multi_coverage(
confidence_sets, one_hot_labels, conditional_labels, conditional_label)
def compute_conditional_coverage_with_checks(
confidence_sets: jnp.ndarray, labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""compute_conditional_coverage with additional argument checks raising."""
_check_labels(confidence_sets, labels)
_check_conditional_labels(confidence_sets, conditional_labels)
return compute_conditional_coverage(
confidence_sets, labels, conditional_labels, conditional_label)
def compute_miscoverage(
confidence_sets: jnp.ndarray, one_hot_labels: jnp.ndarray) -> float:
"""Compute mis-coverage for given one-hot labels.
Mis-coverage is the coverage for multiple labels as given
in one_hot_labels that should not be included in the sets.
Args:
confidence_sets: confidence sets on test set as 0-1 array
one_hot_labels: ground truth labels on test set in one-hot format
Returns:
Mis-coverage.
"""
return compute_conditional_multi_coverage(
confidence_sets, one_hot_labels,
jnp.zeros(confidence_sets.shape[0], int), 0)
def compute_miscoverage_with_checks(
confidence_sets: jnp.ndarray, one_hot_labels: jnp.ndarray) -> float:
"""compute_miscoverage with additional argument checks."""
_check_one_hot_labels(confidence_sets, one_hot_labels)
return compute_miscoverage(confidence_sets, one_hot_labels)
def compute_conditional_miscoverage(
confidence_sets: jnp.ndarray, one_hot_labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""Compute conditional mis-coverage for given one-hot labels.
Args:
confidence_sets: confidence sets on test set as 0-1 array
one_hot_labels: ground truth labels on test set in one-hot format
conditional_labels: conditional labels to compute coverage on a subset
conditional_label: selected conditional to compute coverage for
Returns:
Mis-coverage.
"""
return compute_conditional_multi_coverage(
confidence_sets, one_hot_labels,
conditional_labels, conditional_label)
def compute_conditional_miscoverage_with_checks(
confidence_sets: jnp.ndarray, one_hot_labels: jnp.ndarray,
conditional_labels: jnp.ndarray, conditional_label: int) -> float:
"""compute_conditional_miscoverage with additional argument checks."""
_check_one_hot_labels(confidence_sets, one_hot_labels)
_check_conditional_labels(confidence_sets, conditional_labels)
return compute_conditional_miscoverage(
confidence_sets, one_hot_labels, conditional_labels, conditional_label)
def _check_confidence_sets(confidence_sets: jnp.ndarray):
"""Helper to check shape of confidence sets.
Args:
confidence_sets: predicted confidence sets
Raises:
ValueError if shape is incorrect.
"""
if confidence_sets.ndim != 2:
raise ValueError(
'Expecting confidence_sets of shape n_examples x n_classes.')
if confidence_sets.shape[1] == 0:
raise ValueError('Expecting at least one class.')
def compute_conditional_size(
confidence_sets: jnp.ndarray,
conditional_labels: jnp.ndarray,
conditional_label: int) -> Tuple[float, int]:
"""Compute confidence set size.
Args:
confidence_sets: confidence sets on test set
conditional_labels: conditional labels to compute size on
conditional_label: selected conditional to compute size for
Returns:
Average size.
"""
selected = (conditional_labels == conditional_label)
num_examples = jnp.sum(selected)
size = selected * jnp.sum(confidence_sets, axis=1)
size = jnp.where(num_examples == 0, 0, jnp.sum(size)/num_examples)
return size, num_examples
def compute_conditional_size_with_checks(
confidence_sets: jnp.ndarray,
conditional_labels: jnp.ndarray,
conditional_label: int) -> Tuple[float, int]:
"""compute_conditional_size with additional argument checks."""
_check_confidence_sets(confidence_sets)
_check_conditional_labels(confidence_sets, conditional_labels)
return compute_conditional_size(
confidence_sets, conditional_labels, conditional_label)
def compute_size(confidence_sets: jnp.ndarray) -> Tuple[float, int]:
"""Compute unconditional coverage using compute_conditional_coverage."""
return compute_conditional_size(
confidence_sets, jnp.zeros(confidence_sets.shape[0], int), 0)
def compute_size_with_checks(confidence_sets: jnp.ndarray) -> Tuple[float, int]:
"""compute_size with additional argument checks raising ValueError."""
return compute_conditional_size_with_checks(
confidence_sets, jnp.zeros(confidence_sets.shape[0], int), 0)
| conformal_training-main | evaluation.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smooth implementation of conformal prediction approaches [1] and [2].
This module uses differentiable sorting to implement conformal prediction in a
differentiable manner [1,2], considering both calibration and prediction steps.
[1] Yaniv Romano, Matteo Sesia, Emmanuel J. Candes.
Classification withvalid and adaptive coverage.
NeurIPS, 2020.
[2] Mauricio Sadinle, Jing Lei, and Larry A. Wasserman.
Least ambiguous set-valued classifiers with bounded error levels.
ArXiv, 2016.
"""
import functools
from typing import Optional, Callable, Tuple, Any
import jax
import jax.numpy as jnp
import variational_sorting_net
_SmoothQuantileFn = Callable[[Any, float], float]
_ForwardFn = Callable[
[jnp.ndarray, jnp.ndarray, jnp.ndarray],
Tuple[jnp.ndarray, jnp.ndarray]]
_ForwardBackwardFn = Callable[
[jnp.ndarray, jnp.ndarray],
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]]
def _check_conformal_quantile(
array: jnp.ndarray, q: float,
sos: variational_sorting_net.VariationalSortingNet, dispersion: float):
"""Helper to check quantile arguments.
Args:
array: input array to compute quantile of
q: quantile to compute
sos: smooth order stat object for sorting
dispersion: dispersion for smooth sorting
Raises:
ValueErrors if shape or q invalid.
"""
if array.size == 0:
raise ValueError('Expecting non-empty array.')
if array.ndim != 1:
raise ValueError('Expecting array of shape n.')
if q < 0 or q > 1:
raise ValueError('Expecting q in [0,1].')
if sos.comms['num_wires'] != array.shape[0]:
raise ValueError('Comm pattern has incorrect number of wires.')
if dispersion <= 0:
raise ValueError('Expecting dispersion strictly greater than zero.')
def smooth_conformal_quantile(
array: jnp.ndarray, q: float,
sos: variational_sorting_net.VariationalSortingNet,
dispersion: float) -> float:
"""Smooth implementation of conformal quantile.
Args:
array: input array to compute quantile of
q: quantile to compute
sos: smooth order stat object
dispersion: dispersion for smooth sorting
Returns:
(1 + 1/array.shape[0]) * q quantile of array.
"""
return sos.quantile(
array, dispersion=dispersion, alpha=(1 + 1./array.shape[0]) * q, tau=0.5)
def smooth_conformal_quantile_with_checks(
array: jnp.ndarray, q: float,
sos: variational_sorting_net.VariationalSortingNet,
dispersion: float) -> float:
"""smooth_conformal_quantile with extra argument checks."""
_check_conformal_quantile(array, q, sos, dispersion)
return smooth_conformal_quantile(array, q, sos, dispersion)
def _check_probabilities(probabilities: jnp.ndarray):
"""Helper for checking probabilities for prediction or calibration.
Args:
probabilities: predicted probabilities on test or validation set
Raises:
ValueError if invalid arguments
"""
if len(probabilities.shape) != 2:
raise ValueError('Expecting probabilities of shape n_examples x n_classes.')
if probabilities.size == 0:
raise ValueError('probabilities is empty.')
def _check_sos(
probabilities: jnp.ndarray,
sos: variational_sorting_net.VariationalSortingNet,
dispersion: float):
"""Helper for checking arguments for prediction or calibration.
Args:
probabilities: predicted probabilities on test or validation set
sos: smooth order network
dispersion: dispersion to use for smooth sort
Raises:
ValueError if invalid arguments
"""
if sos.comms['num_wires'] != probabilities.shape[1]:
raise ValueError('VariationalSortingNet used to sort n_classes elements, '
'comm pattern has incorrect number of wires.')
if dispersion <= 0:
raise ValueError('Expecting dispersion strictly greater than zero.')
def _check_predict(tau: float, temperature: float):
"""Helper for checking arguments for prediction.
Args:
tau: threshold
temperature: temperature for smooth thresholding
Raises:
ValueError if invalid arguments
"""
if tau < 0:
raise ValueError('Expecting tau to be >= 0.')
if temperature <= 0:
raise ValueError('Expecting temperature strictly greater than zero.')
def _check_calibrate(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float):
"""Helper for checking argumetns for calibration.
Args:
probabilities: predicted probabilities on validation set
labels: ground truth labels on validation set
alpha: confidence level
Raises:
ValueError if arguments invalid
"""
if len(labels.shape) != 1:
raise ValueError('Expecting labels of shape n_examples.')
if probabilities.shape[0] != labels.shape[0]:
raise ValueError(
'Number of predicted probabilities does not match number of labels.')
if jnp.max(labels) >= probabilities.shape[1]:
raise ValueError('More labels than predicted in probabilities.')
if not jnp.issubdtype(labels.dtype, jnp.integer):
raise ValueError('Expecting labels to be integers.')
if alpha < 0 or alpha > 1:
raise ValueError('Expecting alpha to be in [0, 1].')
def _check_groups(probabilities, groups):
"""Helper for checking groups in subset aware class-conditional prediction.
Args:
probabilities: predicted probabilities
groups: class group labels
Raises:
Value Error if groups are incorrect.
"""
if groups.ndim != 1:
raise ValueError('Expecting group labels of shape n_classes.')
if not jnp.issubdtype(groups.dtype, jnp.integer):
raise ValueError('Expecting group labels to be integers.')
if groups.size != probabilities.shape[1]:
raise ValueError('Number of group labels is not n_classes.')
def smooth_predict_threshold(
probabilities: jnp.ndarray, tau: float, temperature: float) -> jnp.ndarray:
"""Smooth implementation of predict_threshold.
Uses a sigmoid to implement soft thresholding.
Args:
probabilities: predicted probabilities or logits
tau: threshold
temperature: temperature for soft-thresholding
Returns:
Confidence sets
"""
return jax.nn.sigmoid((probabilities - tau) / temperature)
def smooth_predict_threshold_with_checks(
probabilities: jnp.ndarray, tau: float, temperature: float) -> jnp.ndarray:
"""smooth_predict_threshold with extra argument checks."""
_check_probabilities(probabilities)
_check_predict(tau, temperature)
return smooth_predict_threshold(probabilities, tau, temperature)
def _get_sos_fns(
sos: variational_sorting_net.VariationalSortingNet,
dispersion: float) -> Tuple[_ForwardFn, _ForwardBackwardFn]:
"""Get forward and backward functions with given dispersion from sos.
Args:
sos: smooth order statistic object to use forward and backward from
dispersion: dispersion to use for forward and backward
Returns:
Partials for forward and forward with backward
"""
forward_fn = functools.partial(
sos.forward_only, dispersion=dispersion,
lower=0, upper=None, key=None)
forward_backward_fn = functools.partial(
sos.forward_backward, v=None, dispersion=dispersion,
lower=0, upper=None, key=None)
return forward_fn, forward_backward_fn
def smooth_predict_aps(
probabilities: jnp.ndarray,
tau: float, sos: variational_sorting_net.VariationalSortingNet,
rng: Optional[jnp.ndarray] = None,
temperature: float = 0.01, dispersion: float = 0.001) -> jnp.ndarray:
"""Smooth version of predict_raps without regularization.
Uses variational sorting networks to perform smooth sorting and sigmoid for
thresholding. The final confidence sets are fully differentiable with respect
to the input probabilities.
Args:
probabilities: predicted probabilities on test set
tau: threshold
sos: smooth order network
rng: PRNG key for sampling random variables
temperature: temperature for soft thresholding, the lower the harder
the thresholding
dispersion: dispersion to use for smooth sort
Returns:
Confidence sets as arrays in [0, 1] after soft tresholding with given
temperature.
Raises:
ValueError if probabilities have incorrect shape or tau is invalid.
"""
forward_fn, forward_backward_fn = _get_sos_fns(sos, dispersion)
def smooth_sort_fn(p, d):
"""Helper to vmap differentiable sorting across all examples.
Args:
p: vector of probabilities
d: single number to put on the diagonal of the upper triangular matrix L
Returns:
Confidence sets for given probabilities
"""
# Diagonal is set to zero by default, which is basically equivalent to
# computing the cumulative sorted probability and afterwards
# subtracting the individual (sorted) probabilities again.
# This is done as, without randomization, we want the class
# that just exceeds the threshold, to be included in the confidence set.
matrix_l = jnp.triu(jnp.ones((p.shape[0], p.shape[0])))
matrix_l = matrix_l.at[jnp.diag_indices(matrix_l.shape[0])].set(d)
_, cum_sorted_p = forward_fn(-p, p, matrix_l)
sorted_confidence_set = jax.nn.sigmoid(-(cum_sorted_p - tau)/temperature)
_, confidence_set, _ = forward_backward_fn(-p, sorted_confidence_set)
return confidence_set
if rng is not None:
diagonals = jax.random.uniform(rng, (probabilities.shape[0],))
else:
diagonals = jnp.zeros(probabilities.shape[0])
smooth_sort_vmap = jax.vmap(smooth_sort_fn, (0, 0), 0)
return smooth_sort_vmap(probabilities, diagonals)
def smooth_predict_aps_with_checks(
probabilities: jnp.ndarray,
tau: float, sos: variational_sorting_net.VariationalSortingNet,
rng: Optional[jnp.ndarray] = None,
temperature: float = 0.01, dispersion: float = 0.001) -> jnp.ndarray:
"""smooth_predict_aps with extra argument checks raising ValueError."""
_check_probabilities(probabilities)
_check_sos(probabilities, sos, dispersion)
_check_predict(tau, temperature)
return smooth_predict_aps(
probabilities, tau, sos, rng, temperature, dispersion)
def smooth_calibrate_threshold(
probabilities: jnp.ndarray, labels: jnp.ndarray, alpha: float,
smooth_quantile_fn: _SmoothQuantileFn) -> float:
"""Smooth calibrate_threshold version.
Args:
probabilities: predicted probabilities or logits
labels: corresponding ground truth labels
alpha: confidence level
smooth_quantile_fn: smooth quantile function to use
Returns:
Threshold
"""
conformity_scores = probabilities[
jnp.arange(probabilities.shape[0]), labels.astype(int)]
return smooth_quantile_fn(conformity_scores, alpha)
def smooth_calibrate_threshold_with_checks(
probabilities: jnp.ndarray, labels: jnp.ndarray, alpha: float,
smooth_quantile_fn: _SmoothQuantileFn) -> float:
"""smooth_calibrate_threshold with extra argument checks."""
_check_probabilities(probabilities)
_check_calibrate(probabilities, labels, alpha)
return smooth_calibrate_threshold(
probabilities, labels, alpha, smooth_quantile_fn)
def smooth_calibrate_aps(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float,
sos: variational_sorting_net.VariationalSortingNet,
dispersion: float,
smooth_quantile_fn: _SmoothQuantileFn,
rng: Optional[jnp.ndarray] = None,
) -> float:
"""Smooth implementation of calibrate_raps without regularization.
Args:
probabilities: predicted probabilities on validation set
labels: ground truth labels on validation set
alpha: confidence level
sos: smooth order network for probabilities, i.e.,
has to allow sorting n_classes elements
dispersion: dispersion to use for smooth sort.
smooth_quantile_fn: smooth conformal quantile function to use
rng: PRNG key for sampling random variables
Returns:
Threshold.
Raises:
ValueError if probabilities have incorrect shape or alpha is invalid.
"""
forward_fn, forward_backward_fn = _get_sos_fns(sos, dispersion)
def smooth_sort_fn(p, d, l):
matrix_l = jnp.triu(jnp.ones((p.shape[0], p.shape[0])))
matrix_l = matrix_l.at[jnp.diag_indices(matrix_l.shape[0])].set(d)
_, cum_sorted_p = forward_fn(
-p, p, matrix_l)
_, cum_p, _ = forward_backward_fn(
-p, cum_sorted_p)
return cum_p[l]
if rng is not None:
diagonals = jax.random.uniform(rng, (probabilities.shape[0],))
else:
diagonals = jnp.ones(probabilities.shape[0])
smooth_sort_vmap = jax.vmap(smooth_sort_fn, (0, 0, 0), 0)
scores = smooth_sort_vmap(probabilities, diagonals, labels)
return smooth_quantile_fn(scores, 1 - alpha)
def smooth_calibrate_aps_with_checks(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float,
sos: variational_sorting_net.VariationalSortingNet,
dispersion: float,
smooth_quantile_fn: _SmoothQuantileFn,
rng: Optional[jnp.ndarray] = None,
) -> float:
"""smooth_calibrate_aps with additional argument checks."""
_check_probabilities(probabilities)
_check_sos(probabilities, sos, dispersion)
_check_calibrate(probabilities, labels, alpha)
return smooth_calibrate_aps(
probabilities, labels, alpha, sos,
dispersion, smooth_quantile_fn, rng)
| conformal_training-main | smooth_conformal_prediction.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import numpy as np
import tensorflow_datasets as tfds
import data as cpdata
DATA_DIR = './data/'
class DataTest(parameterized.TestCase):
@parameterized.parameters([
dict(dataset='mnist', train_examples=60000, val_examples=10000),
dict(dataset='mnist', train_examples=60000, val_examples=0),
])
def test_load_data_split_sizes(self, dataset, train_examples, val_examples):
data = cpdata.load_data_split(
dataset, val_examples=val_examples, data_dir=DATA_DIR)
ds_sizes = data['sizes']
self.assertLen(data['train'], train_examples - val_examples)
self.assertLen(data['test'], 10000)
self.assertEqual(ds_sizes['train'], train_examples - val_examples)
self.assertEqual(ds_sizes['val'], val_examples)
self.assertEqual(ds_sizes['test'], 10000)
if val_examples > 0:
self.assertLen(data['val'], val_examples)
else:
self.assertIsNone(data['val'])
def test_load_data_split_errors(self):
with self.assertRaises(ValueError):
cpdata.load_data_split('mnist', val_examples=-1, data_dir=DATA_DIR)
@parameterized.parameters([
dict(batch_size=128),
])
def test_load_batches(self, batch_size):
val_examples = 59500
train_examples = 60000 - val_examples
data = cpdata.load_data_split(
'mnist', val_examples=val_examples, data_dir=DATA_DIR)
data['train'] = data['train'].batch(batch_size)
b = 0
for b, (inputs, labels) in enumerate(cpdata.load_batches(data['train'])):
chex.assert_rank([inputs, labels], [4, 1])
# Batch size might be smaller for the last batch!
if b == 0:
chex.assert_shape(inputs, (batch_size, 28, 28, 1))
chex.assert_shape(labels, (batch_size,))
# For MNIST, the scaling has to happen manually.
self.assertGreaterEqual(255, np.max(inputs))
self.assertGreaterEqual(np.max(inputs), 0)
self.assertGreaterEqual(9, np.max(labels))
self.assertEqual(b + 1, np.ceil(train_examples/batch_size))
# Testing all will cause a timeout, so just testing autoaugment
# from now on as that's the most complex augmentation.
@parameterized.parameters([
dict(augmentation_name='augment_flip_crop', augmentation_args=dict(
shape=(32, 32, 3), crop=4, mode='CONSTANT', replace=121)),
dict(augmentation_name='augment_autoaugment',
augmentation_args=dict(shape=(32, 32, 3), replace=121)),
dict(augmentation_name='augment_cutout',
augmentation_args=dict(replace=121, pad=8)),
])
def test_augment(self, augmentation_name, augmentation_args):
batch_size = 100
# Not using cpdata.load_data_split to avoid timeouts.
ds = tfds.load(
'cifar10', split='train[:1000]', with_info=False, data_dir=DATA_DIR)
augmentation = getattr(cpdata, augmentation_name, None)
self.assertIsNotNone(augmentation)
augmentation = functools.partial(augmentation, **augmentation_args)
ds = ds.map(augmentation).batch(batch_size)
for inputs, _ in cpdata.load_batches(ds):
chex.assert_shape(inputs, (batch_size, 32, 32, 3))
self.assertGreaterEqual(255, np.max(inputs))
self.assertGreaterEqual(np.max(inputs), 0)
break
if __name__ == '__main__':
absltest.main()
| conformal_training-main | data_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loading datasets for training."""
import functools
from typing import Dict, Any, Tuple
from absl import logging
import jax.numpy as jnp
import ml_collections as collections
import tensorflow as tf
import data as cpdata
def apply_cifar_augmentation(
config: collections.ConfigDict, ds: tf.data.Dataset,
shape: Tuple[int, int, int]) -> tf.data.Dataset:
"""Applies data augmentation for CIFAR dataset.
Args:
config: training configuration
ds: dataset to apply augmentation to
shape: image shape
Returns:
Augmented dataset.
"""
if config.cifar_augmentation == 'standard':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
ds = ds.map(standard_fn)
elif config.cifar_augmentation == 'autoaugment':
autoaugment_fn = functools.partial(
cpdata.augment_autoaugment, shape=shape, replace=121)
ds = ds.map(autoaugment_fn)
elif config.cifar_augmentation == 'standard+cutout':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
cutout_fn = functools.partial(cpdata.augment_cutout, replace=121, pad=8)
ds = ds.map(standard_fn)
ds = ds.map(cutout_fn)
elif config.cifar_augmentation == 'standard+autoaugment':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
autoaugment_fn = functools.partial(
cpdata.augment_autoaugment, shape=shape, replace=121)
ds = ds.map(standard_fn)
ds = ds.map(autoaugment_fn)
elif config.cifar_augmentation == 'standard+autoaugment+cutout':
standard_fn = functools.partial(
cpdata.augment_flip_crop,
shape=shape, crop=4, mode='CONSTANT', replace=121)
autoaugment_fn = functools.partial(
cpdata.augment_autoaugment, shape=shape, replace=121)
cutout_fn = functools.partial(cpdata.augment_cutout, replace=121, pad=8)
ds = ds.map(standard_fn)
ds = ds.map(autoaugment_fn)
ds = ds.map(cutout_fn)
else:
raise ValueError('Invalid augmentation for CIFAR10.')
return ds
def get_data_stats(config: collections.ConfigDict) -> Dict[str, Any]:
"""Get data statistics for selected dataset.
Retrieves data sizes, shapes and whitening statistics based on the
dataset selected in config.dataset.
Args:
config: training configuration
Returns:
Dictionary containing statistics of loaded data split.
"""
data = {}
if config.dataset == 'wine_quality':
data['classes'] = 2
train_examples = int(5000*0.8) - config.val_examples
test_examples = 5000 - config.val_examples - train_examples
data['sizes'] = {
'train': train_examples,
'val': config.val_examples,
'test': test_examples,
}
data['shape'] = (1, 1, 11)
data['means'] = [
10.532083, 0.04565686, 0.33281144, 0.99399555, 6.850714,
35.23343, 3.187603, 6.373672, 0.49019712, 138.01242, 0.27974856
]
data['stds'] = [
1.2350279, 0.022253787, 0.119335935, 0.003012671, 0.85485053,
17.152323, 0.15184218, 5.0720124, 0.11392499, 42.492615, 0.102494776
]
elif config.dataset == 'mnist':
data['classes'] = 10
data['sizes'] = {
'train': 60000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (28, 28, 1)
data['means'] = [0.5]
data['stds'] = [0.5]
elif config.dataset == 'emnist_byclass':
# For evaluation, we want to keep the number of test examples and validation
# examples down, because >10-20k test examles slows down evaluation
# considerably, and we run into OOM problems.
data['classes'] = 26 * 2
data['sizes'] = {
'train': 104000 - config.val_examples, # = 52 * 2000
'val': config.val_examples,
'test': 10400, # = 52 * 200
}
data['shape'] = (28, 28, 1)
data['means'] = [0.5]
data['stds'] = [0.5]
elif config.dataset == 'fashion_mnist':
data['classes'] = 10
data['sizes'] = {
'train': 60000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (28, 28, 1)
data['means'] = [0.5]
data['stds'] = [0.5]
elif config.dataset == 'cifar10':
data['classes'] = 10
data['sizes'] = {
'train': 50000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (32, 32, 3)
data['means'] = [0.49137254902, 0.482352941176, 0.446666666667]
data['stds'] = [0.247058823529, 0.243529411765, 0.261568627451]
elif config.dataset == 'cifar100':
data['classes'] = 100
data['sizes'] = {
'train': 50000 - config.val_examples,
'val': config.val_examples,
'test': 10000,
}
data['shape'] = (28, 28, 1)
data['means'] = [0.491399755166, 0.4821585592989, 0.446530913373]
data['stds'] = [0.2470322514179, 0.2434851647, 0.2615878392604]
else:
raise ValueError('Invalid dataset.')
data['means'] = jnp.array(data['means'])
data['stds'] = jnp.array(data['stds'])
return data
def _check_batch_sizes(config: collections.ConfigDict, data: Dict[str, Any]):
"""Helper to check whether dataset sizes are divisible by batch sizes.
Args:
config: training configuration
data: datasets and sizes
"""
for key, batch_size in zip([
'train', 'test', 'val'
], [
config.batch_size, config.test_batch_size, config.test_batch_size,
]):
if data['sizes'][key] % batch_size != 0:
raise ValueError(
'Trying to do conformal training with batch size %d '
'but %s set size %d is not divisible by the batch size '
'(and drop_remainder is False).' % (
batch_size, key, data['sizes'][key],
))
def _batch_sets(
config: collections.ConfigDict, data: Dict[str, Any], drop_remainder: bool):
"""Helper to take care of training set shuffling.
Args:
config: training configuration
data: datasets and sizes
drop_remainder: whether to drop the remaining examples if they
cannot fill a full batch
"""
# For some datasets, we need to drop any batch that is smaller than
# the requested batch size at the end. This is because, for conformal
# training, the batch size is fixed due to the smooth sorting component used.
# So, to be fair, we just drop any batch at the end.
if data['sizes']['train'] % config.batch_size != 0:
drop_remainder = True
logging.warning(
'dropping last batch as %d training examples not divisible '
'by %d batch size!', data['sizes']['train'], config.batch_size)
# Unshuffled and clean versions for computing logits in a
# deterministic way.
data['train_ordered'] = data['train'].batch(
config.batch_size, drop_remainder=drop_remainder)
data['train_clean'] = data['train_clean'].batch(
config.batch_size, drop_remainder=drop_remainder)
# We allow to run cross-validation like experiments by repeating the
# training set X times, shuffling and then taking the first
# examples. This creates a training set of same size but
# emulates sampling with up to config.resampling replacements.
if config.resampling:
if config.resampling <= 1:
raise ValueError('Cannot resample training set once or less often.')
data['train'] = data['train'].repeat(config.resampling)
data['train'] = data['train'].shuffle(
config.resampling * data['sizes']['train'], seed=config.seed)
data['train'] = data['train'].take(data['sizes']['train'])
else:
data['train'] = data['train'].shuffle(
data['sizes']['train'], seed=config.seed)
data['train'] = data['train'].batch(
config.batch_size, drop_remainder=drop_remainder)
if data['val'] is not None:
data['val'] = data['val'].batch(
config.test_batch_size, drop_remainder=drop_remainder)
data['test'] = data['test'].batch(
config.test_batch_size, drop_remainder=drop_remainder)
if not drop_remainder:
_check_batch_sizes(config, data)
def get_data(config: collections.ConfigDict) -> Dict[str, Any]:
"""Get data for training and testing.
Args:
config: training configuration
Returns:
Dictionary containing training and test datasets, number of classes,
and mean and std per channel for training dataset.
"""
def map_mnist_cifar(batch):
"""Mapping for image int to float on MNIST/CIFAR."""
return {
'image': tf.cast(batch['image'], tf.float32) / 255.,
'label': batch['label'],
}
def map_emnist_byclass_transpose_and_labels(batch):
"""Helper to map labels for EMNIST/byClass."""
return {
'image': tf.cast(
tf.transpose(batch['image'], perm=[1, 0, 2]), tf.float32) / 255.,
'label': batch['label'] - 10,
}
def filter_emnist_byclass(batch):
"""Helper to filter out digits in EMNIST/byClass."""
return batch['label'] >= 10
def map_wine_quality_expand_and_relabel(batch):
"""Helper to expand features to image size for win quality."""
keys = [
'alcohol',
'chlorides',
'citric acid',
'density',
'fixed acidity',
'free sulfur dioxide',
'pH',
'residual sugar',
'sulphates',
'total sulfur dioxide',
'volatile acidity',
]
features = tf.stack(
[tf.cast(batch['features'][k], tf.float32) for k in keys], axis=0)
return {
'image': tf.cast(tf.reshape(features, (1, 1, -1)), tf.float32),
'label': 1 if batch['quality'] >= 6 else 0,
}
data = get_data_stats(config)
drop_remainder = False
if config.dataset == 'wine_quality':
train_examples = data['sizes']['train']
val_examples = data['sizes']['val']
data_split = cpdata.create_data_split(
'wine_quality/white',
train_examples, val_examples, padding_size=5000)
data['train'] = data_split['train'].map(map_wine_quality_expand_and_relabel)
data['val'] = data_split['val']
if data['val'] is not None:
data['val'] = data['val'].map(map_wine_quality_expand_and_relabel)
data['test'] = data_split['test'].map(map_wine_quality_expand_and_relabel)
data['train_clean'] = data['train']
# Adapt data split to avoid check on batch size below.
data_split['sizes'] = data['sizes']
elif config.dataset == 'emnist_byclass':
# The validation example number is a fix for type checking:
# We want data_split['val'] to be None if val_examples=0, otherwise
# type checks below will fail.
# So we request 1 validation examples if val_examples > 0 and 0 else.
train_examples = data['sizes']['train']
val_examples = data['sizes']['val']
test_examples = data['sizes']['test']
data_split = cpdata.load_data_split(
'emnist/byclass', val_examples=min(config.val_examples, 1))
# Train and validation set is created from the provided train dataset
# by filtering, mapping and then taking train_examples + val_examples.
data['train'] = data_split['train'].filter(filter_emnist_byclass)
data['train'] = data['train'].map(map_emnist_byclass_transpose_and_labels)
data['train'] = data['train'].take(train_examples + val_examples)
data['val'] = data_split['val']
if data['val'] is not None:
data['val'] = data['train'].skip(train_examples)
# Important to take after defining the validation set!
data['train'] = data['train'].take(train_examples)
data['test'] = data_split['test'].filter(filter_emnist_byclass)
data['test'] = data['test'].map(map_emnist_byclass_transpose_and_labels)
data['test'] = data['test'].take(test_examples)
data['train_clean'] = data['train']
# Adapt data split to avoid check on batch size below.
data_split['sizes'] = data['sizes']
elif config.dataset in ['mnist', 'fashion_mnist', 'cifar10', 'cifar100']:
data_split = cpdata.load_data_split(
config.dataset, val_examples=config.val_examples)
# We need to apply data augmentation before the mapping as the mapping
# divides by 255 (which was before done in load_batches), but
# data augmentation operates on proper images, not floats.
data['train'] = data_split['train']
if config.dataset.find('cifar') >= 0:
logging.info('Adding data augmentation for CIFAR.')
data['train'] = apply_cifar_augmentation(
config, data['train'], data_split['shape'])
data['train'] = data['train'].map(map_mnist_cifar)
# Dataset without data augmentation:
data['train_clean'] = data_split['train'].map(map_mnist_cifar)
data['val'] = data_split['val']
if data['val'] is not None:
data['val'] = data['val'].map(map_mnist_cifar)
data['test'] = data_split['test'].map(map_mnist_cifar)
else:
raise ValueError('Invalid dataset.')
data['sizes'] = data_split['sizes']
data['shape'] = data_split['shape']
# This takes care of shuffling, batching and resampling with replacement
# if requested.
_batch_sets(config, data, drop_remainder)
return data
| conformal_training-main | data_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for smooth conformal prediction."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
import sorting_nets
import variational_sorting_net
import conformal_prediction as cp
import smooth_conformal_prediction as scp
import test_utils as cptutils
class SmoothConformalPredictionTest(parameterized.TestCase):
def setUp(self):
super(SmoothConformalPredictionTest, self).setUp()
np.random.seed(0)
def _get_smooth_order_stats(self, length):
comm = sorting_nets.comm_pattern_batcher(length, make_parallel=True)
sos = variational_sorting_net.VariationalSortingNet(
comm, smoothing_strategy='entropy_reg', sorting_strategy='hard')
return sos
@parameterized.parameters([
dict(num_examples=10000, num_classes=10, tau=0.9)
])
def test_smooth_predict_aps(self, num_examples, num_classes, tau):
# Randomness is generally not handled equivalently.
rng = None
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 0)
confidence_sets = cp.predict_raps(
probabilities, tau, k_reg=None, lambda_reg=None, rng=rng)
dispersion = 0.00001
temperature = 0.00001
sos = self._get_smooth_order_stats(num_classes)
smooth_confidence_sets = scp.smooth_predict_aps(
probabilities, tau, sos, rng=rng,
temperature=temperature, dispersion=dispersion)
smooth_confidence_sets = smooth_confidence_sets.at[
smooth_confidence_sets > 0.5].set(1)
smooth_confidence_sets = smooth_confidence_sets.at[
smooth_confidence_sets <= 0.5].set(0)
np.testing.assert_equal(np.array(confidence_sets),
np.array(smooth_confidence_sets))
@parameterized.parameters([
dict(probabilities=np.array([]), tau=0.9,
temperature=0.01, dispersion=0.01, length=10),
dict(probabilities=np.zeros((100)), tau=0.9,
temperature=0.01, dispersion=0.01, length=10),
dict(probabilities=np.zeros((100, 10)), tau=-0.1,
temperature=0.01, dispersion=0.01, length=10),
dict(probabilities=np.zeros((100, 10)), tau=0.9,
temperature=0, dispersion=0.01, length=10),
dict(probabilities=np.zeros((100, 10)), tau=0.9,
temperature=0.01, dispersion=0, length=10),
dict(probabilities=np.zeros((100, 10)), tau=0.9,
temperature=-0.1, dispersion=0.01, length=10),
dict(probabilities=np.zeros((100, 10)), tau=0.9,
temperature=0.01, dispersion=-0.1, length=10),
dict(probabilities=np.zeros((100, 10)), tau=0.9,
temperature=0.01, dispersion=0.01, length=9),
])
def test_smooth_predict_aps_errors(
self, probabilities, tau, temperature, dispersion, length):
with self.assertRaises(ValueError):
sos = self._get_smooth_order_stats(length)
scp.smooth_predict_aps_with_checks(
jnp.array(probabilities), tau, sos, None, temperature, dispersion)
@parameterized.parameters([
dict(num_examples=1000, num_classes=10, alpha=0.9)
])
def test_smooth_calibrate_aps(self, num_examples, num_classes, alpha):
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 0)
rng = None
k_reg = None
lambda_reg = None
# If we want to have equality below, temperature and dispersion need
# to be as low as possible to get results based on hard sorting and
# hard thresholding.
dispersion = 0.00001
tau = cp.calibrate_raps(
probabilities, labels, alpha=alpha,
k_reg=k_reg, lambda_reg=lambda_reg, rng=rng)
probabilities_sos = self._get_smooth_order_stats(num_classes)
scores_sos = self._get_smooth_order_stats(num_examples)
smooth_quantile_fn = functools.partial(
scp.smooth_conformal_quantile, sos=scores_sos, dispersion=dispersion)
tau_ = scp.smooth_calibrate_aps(
probabilities, labels, alpha=alpha,
sos=probabilities_sos, dispersion=dispersion,
smooth_quantile_fn=smooth_quantile_fn, rng=rng)
self.assertAlmostEqual(tau, tau_, places=2)
@parameterized.parameters([
dict(probabilities=np.array([]),
labels=np.array([], dtype=int), alpha=0.1,
length=10, dispersion=0.01),
dict(probabilities=np.zeros((100)),
labels=np.ones((100), dtype=int), alpha=0.1,
length=10, dispersion=0.01),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int) * 99, alpha=0.1,
length=10, dispersion=0.01),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)) * 0.5, alpha=0.1,
length=10, dispersion=0.01),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=-0.1,
length=10, dispersion=0.01),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=1.1,
length=10, dispersion=0.01),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=0.1,
length=9, dispersion=0.01),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=0.1,
length=10, dispersion=-1),
])
def test_smooth_calibrate_aps_errors(
self, probabilities, labels, alpha, length, dispersion):
probabilities_sos = self._get_smooth_order_stats(length)
scores_sos = self._get_smooth_order_stats(100)
smooth_quantile_fn = functools.partial(
scp.smooth_conformal_quantile_with_checks,
sos=scores_sos, dispersion=0.1)
with self.assertRaises(ValueError):
scp.smooth_calibrate_aps_with_checks(
jnp.array(probabilities), jnp.array(labels), alpha,
sos=probabilities_sos, dispersion=dispersion,
smooth_quantile_fn=smooth_quantile_fn, rng=None)
@parameterized.parameters([
dict(num_examples=10000, num_classes=10, tau=0.9)
])
def test_predict_threshold(self, num_examples, num_classes, tau):
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 0)
confidence_sets = cp.predict_threshold(probabilities, tau)
temperature = 0.00001
smooth_confidence_sets = scp.smooth_predict_threshold(
probabilities, tau, temperature=temperature)
smooth_confidence_sets = smooth_confidence_sets.at[
smooth_confidence_sets > 0.5].set(1)
smooth_confidence_sets = smooth_confidence_sets.at[
smooth_confidence_sets <= 0.5].set(0)
np.testing.assert_equal(np.array(confidence_sets),
np.array(smooth_confidence_sets))
@parameterized.parameters([
dict(probabilities=np.array([]), tau=0.9,
temperature=0.01),
dict(probabilities=np.zeros((100)), tau=0.9,
temperature=0.01),
dict(probabilities=np.zeros((100, 10)), tau=-0.1,
temperature=0.01),
dict(probabilities=np.zeros((100, 10)), tau=0.9,
temperature=0),
])
def test_predict_threshdold_errors(self, probabilities, tau, temperature):
with self.assertRaises(ValueError):
scp.smooth_predict_threshold_with_checks(
jnp.array(probabilities), tau, temperature)
@parameterized.parameters([
dict(num_examples=1000, num_classes=10, alpha=0.9)
])
def test_smooth_calibrate_threshold(self, num_examples, num_classes, alpha):
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 0)
tau = cp.calibrate_threshold(probabilities, labels, alpha)
dispersion = 0.00001
scores_sos = self._get_smooth_order_stats(num_examples)
smooth_quantile_fn = functools.partial(
scp.smooth_conformal_quantile, sos=scores_sos, dispersion=dispersion)
tau_ = scp.smooth_calibrate_threshold(
probabilities, labels, alpha,
smooth_quantile_fn=smooth_quantile_fn)
self.assertAlmostEqual(tau, tau_, places=2)
@parameterized.parameters([
dict(probabilities=np.array([]),
labels=np.array([], dtype=int), alpha=0.1),
dict(probabilities=np.zeros((100)),
labels=np.ones((100), dtype=int), alpha=0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int) * 99, alpha=0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)) * 0.5, alpha=0.1),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100), dtype=int), alpha=-0.1),
])
def test_smooth_calibrate_threshold_errors(
self, probabilities, labels, alpha):
scores_sos = self._get_smooth_order_stats(100)
smooth_quantile_fn = functools.partial(
scp.smooth_conformal_quantile_with_checks,
sos=scores_sos, dispersion=0.1)
with self.assertRaises(ValueError):
scp.smooth_calibrate_threshold_with_checks(
jnp.array(probabilities), jnp.array(labels), alpha,
smooth_quantile_fn=smooth_quantile_fn)
if __name__ == '__main__':
absltest.main()
| conformal_training-main | smooth_conformal_prediction_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train models for experiments."""
from absl import logging
import haiku as hk
import ml_collections as collections
import data_utils as cpdatautils
import train_conformal as cpconformal
import train_coverage as cpcoverage
import train_normal as cpnormal
import train_utils as cputils
def train(config: collections.ConfigDict):
"""Helper to allow to directly call train with a config dict."""
rng = hk.PRNGSequence(config.seed)
data = cpdatautils.get_data(config)
logging.info('Loaded dataset.')
if config.learning_rate_schedule == 'exponential':
lr_scheduler_ = cputils.ExponentialLRScheduler
args = {'learning_rate_decay': config.exponential.learning_rate_decay}
elif config.learning_rate_schedule == 'step':
lr_scheduler_ = cputils.MultIStepLRScheduler
args = {'learning_rate_decay': config.step.learning_rate_decay}
else:
raise ValueError('Invalid learning rate schedule.')
lr_scheduler = lr_scheduler_(
learning_rate=config.learning_rate,
num_examples=data['sizes']['train'], batch_size=config.batch_size,
epochs=config.epochs, **args)
if config.optimizer == 'sgd':
optimizer = cputils.get_sgd_optimizer(
config.sgd.momentum, config.sgd.nesterov, lr_scheduler)
elif config.optimizer == 'adam':
optimizer = cputils.get_adam_optimizer(
config.adam.b1, config.adam.b2, config.adam.eps, lr_scheduler)
else:
raise ValueError('Invalid optimizer.')
logging.info('Loaded optimizer.')
if config.mode == 'normal':
trainer = cpnormal.TrainNormal(config, data, optimizer)
elif config.mode == 'coverage':
trainer = cpcoverage.TrainCoverage(config, data, optimizer)
elif config.mode == 'conformal':
trainer = cpconformal.TrainConformal(config, data, optimizer)
else:
raise ValueError('Invalid training mode.')
trainer.run(rng)
| conformal_training-main | train.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training utilities."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
import train_utils as cputils
class TrainUtilsTest(parameterized.TestCase):
@parameterized.parameters([
dict(learning_rate_decay=0.1, num_examples=50000, batch_size=128,
epochs=150),
dict(learning_rate_decay=0.1, num_examples=50000, batch_size=64,
epochs=150),
dict(learning_rate_decay=0.1, num_examples=10000, batch_size=128,
epochs=150),
dict(learning_rate_decay=0.1, num_examples=50000, batch_size=128,
epochs=250),
])
def test_multi_step_lr_scheduler(
self, learning_rate_decay, num_examples, batch_size, epochs):
learning_rate = 0.1
lr_scheduler = cputils.MultIStepLRScheduler(
learning_rate, learning_rate_decay, num_examples, batch_size, epochs)
# Test final and initial learning rate.
first_step = 0
self.assertAlmostEqual(
lr_scheduler(first_step), learning_rate)
final_step = num_examples*epochs//batch_size
self.assertAlmostEqual(
lr_scheduler(final_step), learning_rate * learning_rate_decay**3)
# Check each learning rate drop individually.
steps_per_epoch = np.ceil(num_examples/batch_size)
first_drop_epoch = epochs // 5 * 2
first_drop_step = first_drop_epoch * steps_per_epoch
self.assertAlmostEqual(lr_scheduler(first_drop_step - 1), learning_rate)
self.assertAlmostEqual(
lr_scheduler(first_drop_step), learning_rate * learning_rate_decay)
second_drop_epoch = epochs // 5 * 3
second_drop_step = second_drop_epoch * steps_per_epoch
self.assertAlmostEqual(
lr_scheduler(second_drop_step - 1), learning_rate * learning_rate_decay)
self.assertAlmostEqual(
lr_scheduler(second_drop_step), learning_rate * learning_rate_decay**2)
third_drop_epoch = epochs // 5 * 4
third_drop_step = third_drop_epoch * steps_per_epoch
self.assertAlmostEqual(
lr_scheduler(third_drop_step - 1),
learning_rate * learning_rate_decay**2)
self.assertAlmostEqual(
lr_scheduler(third_drop_step), learning_rate * learning_rate_decay**3)
def test_compute_general_classification_loss(self):
confidence_sets = jnp.zeros((100, 10))
loss_matrix = jnp.eye(10)
labels = jnp.zeros(100).astype(int)
loss = cputils.compute_general_classification_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 1.)
confidence_sets = confidence_sets.at[:, 0].set(1)
loss = cputils.compute_general_classification_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 0.)
confidence_sets = confidence_sets.at[:, 1].set(1)
loss = cputils.compute_general_classification_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 0.)
loss_matrix = jnp.ones((10, 10))
loss = cputils.compute_general_classification_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 1.)
confidence_sets = confidence_sets.at[:, 1].set(0)
loss = cputils.compute_general_classification_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 0.)
def test_compute_general_binary_cross_entropy_loss(self):
confidence_sets = jnp.zeros((100, 10))
loss_matrix = jnp.eye(10)
labels = jnp.zeros(100).astype(int)
loss = cputils.compute_general_binary_cross_entropy_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, - jnp.log(1e-8))
confidence_sets = confidence_sets.at[:, 0].set(1)
loss = cputils.compute_general_binary_cross_entropy_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 0.)
confidence_sets = confidence_sets.at[:, 1].set(1)
loss = cputils.compute_general_binary_cross_entropy_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 0.)
loss_matrix = jnp.ones((10, 10))
loss = cputils.compute_general_binary_cross_entropy_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, - jnp.log(1e-8), places=3)
confidence_sets = confidence_sets.at[:, 1].set(0)
loss = cputils.compute_general_binary_cross_entropy_loss(
confidence_sets, labels, loss_matrix)
self.assertAlmostEqual(loss, 0.)
@parameterized.parameters([
dict(num_classes=5, target_size=0),
dict(num_classes=5, target_size=1),
dict(num_classes=5, target_size=5),
])
def test_compute_hinge_size_loss(self, num_classes, target_size):
for k in range(num_classes):
confidence_sets = np.zeros((1, num_classes))
confidence_sets[:, :k] = 1
self.assertEqual(np.sum(confidence_sets), k)
size_loss = cputils.compute_hinge_size_loss(
jnp.array(confidence_sets), target_size=target_size,
transform=lambda x: x, weights=jnp.ones(confidence_sets.shape[0]))
expected_loss = max(k - target_size, 0)
self.assertAlmostEqual(size_loss, expected_loss)
size_loss = cputils.compute_hinge_size_loss(
jnp.array(confidence_sets), target_size=target_size,
transform=jnp.log, weights=jnp.ones(confidence_sets.shape[0]))
self.assertAlmostEqual(size_loss, np.log(expected_loss), places=3)
@parameterized.parameters([
dict(num_classes=5, target_size=0, bound_size=0, bound_weight=0.5),
dict(num_classes=5, target_size=1, bound_size=3, bound_weight=0.5),
dict(num_classes=5, target_size=1, bound_size=3, bound_weight=0.99),
dict(num_classes=5, target_size=5, bound_size=7, bound_weight=0.5),
])
def test_compute_hinge_bounded_size_loss(
self, num_classes, target_size, bound_size, bound_weight):
for k in range(num_classes):
confidence_sets = np.zeros((1, num_classes))
confidence_sets[:, :k] = 1
self.assertEqual(np.sum(confidence_sets), k)
size_loss = cputils.compute_hinge_bounded_size_loss(
jnp.array(confidence_sets), target_size=target_size,
bound_size=bound_size, bound_weight=bound_weight,
transform=lambda x: x, weights=jnp.ones(confidence_sets.shape[0]))
expected_loss = (1 - bound_weight) * max(k - target_size, 0)
expected_loss += bound_weight * max(k - bound_size, 0)
self.assertAlmostEqual(size_loss, expected_loss)
size_loss = cputils.compute_hinge_bounded_size_loss(
jnp.array(confidence_sets), target_size=target_size,
bound_size=bound_size, bound_weight=bound_weight,
transform=jnp.log, weights=jnp.ones(confidence_sets.shape[0]))
self.assertAlmostEqual(size_loss, np.log(expected_loss), places=3)
if __name__ == '__main__':
absltest.main()
| conformal_training-main | train_utils_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training loop for coverage training, i.e., with confidence set prediction."""
import functools
import itertools
from typing import Tuple, Dict, Any, Callable, Union
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections as collections
import sorting_nets
import variational_sorting_net
import conformal_prediction as cp
import data as cpdata
import evaluation as cpeval
import smooth_conformal_prediction as scp
import train_normal as cpnormal
import train_utils as cputils
SizeLossFn = Callable[[jnp.ndarray, jnp.ndarray, jnp.ndarray], jnp.ndarray]
CoverageLossFn = Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]
SmoothPredictFn = Callable[[jnp.ndarray, Any, jnp.ndarray], jnp.ndarray]
_CalibrateFn = Callable[
[Union[Any, jnp.ndarray], Union[Any, jnp.ndarray], Union[Any, jnp.ndarray]],
Union[Any, jnp.ndarray]]
_LossTransformFn = Callable[[jnp.ndarray], jnp.ndarray]
class TrainCoverage(cpnormal.TrainNormal):
"""Coverage training routine following [1] but adapted to also work with APS.
Trains a model by predicting confidence sets using some soft confidence set
prediction method. [1] uses simple soft-thresholding with a fixed threshold
but a smooth implementation of [2] can also be used. See
smooth_conformal_prediction.
[1] Anthony Bellotti.
Optimized conformal classification using gradient descent approximation.
ArXiv, 2021.
[2] Yaniv Romano, Matteo Sesia, Emmanuel J. Candes.
Classification withvalid and adaptive coverage.
NeurIPS, 2020.
"""
def __init__(self, config, data, optimizer):
"""Initialize coverage training.
Args:
config: training configuration
data: datasets and information
optimizer: optimizer to use
"""
super(TrainCoverage, self).__init__(config, data, optimizer)
self.fixed_smooth_predict_fn = None
"""(callable) Fixed smooth prediction function to get confidence sets."""
self.calibrate_fn = None
"""(callable) Conformal prediction calibration function for fine-tuning."""
self.coverage_loss_fn = None
"""(callable) Loss function for confidence sets."""
self.size_loss_fn = None
"""(callable) Size loss for confidence sets."""
self.loss_transform_fn = None
"""(callable) Monotonic transform of coverage + size loss."""
self.tau = None
""" (float) For fine-tuning, tau needs to be calibrated. """
def compute_loss_and_error(
self,
trainable_params: cputils.FlatMapping,
fixed_params: cputils.FlatMapping,
inputs: jnp.ndarray,
labels: jnp.ndarray,
model_state: cputils.FlatMapping,
training: bool,
rng: jnp.ndarray,
) -> Tuple[jnp.ndarray, Tuple[cputils.FlatMapping, Dict[str, Any]]]:
"""Compute coverage loss and size loss.
Args:
trainable_params: trainable model parameters
fixed_params: model parameters fixed for fine-tuning
inputs: input examples
labels: ground truth examples
model_state: model state
training: training mode
rng: random key
Returns:
Tuple consisting of loss and another tuple of new model state and a
dictionary with additional information
"""
forward_rng, predict_rng = None, None
if rng is not None:
forward_rng, predict_rng = jax.random.split(rng, 2)
params = hk.data_structures.merge(trainable_params, fixed_params)
logits, new_model_state = self.model.apply(
params, model_state, forward_rng, inputs, training=training)
confidence_sets = self.fixed_smooth_predict_fn(
logits, self.tau, predict_rng)
coverage_loss = self.coverage_loss_fn(confidence_sets, labels)
size_loss = self.size_loss_fn(confidence_sets, logits, labels)
size_loss *= self.config.coverage.size_weight
weight_decay_loss = cputils.compute_weight_decay(params)
weight_decay_loss *= self.config.weight_decay
cross_entropy_loss = cputils.compute_cross_entropy_loss(logits, labels)
cross_entropy_loss *= self.config.coverage.cross_entropy_weight
loss = self.loss_transform_fn(coverage_loss + size_loss + 1e-8)
loss += cross_entropy_loss
loss += weight_decay_loss
confidence_sets = jnp.greater(
confidence_sets, jnp.ones_like(confidence_sets) * 0.5)
error = 1 - cpeval.compute_accuracy(logits, labels)
coverage = cpeval.compute_coverage(confidence_sets, labels)
size, _ = cpeval.compute_size(confidence_sets)
return loss, (new_model_state, {
'coverage_loss': coverage_loss,
'size_loss': size_loss,
'cross_entropy_loss': cross_entropy_loss,
'weight_decay': weight_decay_loss,
'error': error,
'coverage': coverage,
'size': size,
})
def get_sos(
self, length: int) -> variational_sorting_net.VariationalSortingNet:
"""Set up smooth order stat object for given array length.
Args:
length: length of array to be sorted
Returns:
Smooth order stat object
"""
comm = sorting_nets.comm_pattern_batcher(
length, make_parallel=True)
sos = variational_sorting_net.VariationalSortingNet(
comm, smoothing_strategy='entropy_reg', sorting_strategy='hard')
return sos
def get_class_groups(
self, config: collections.ConfigDict) -> Tuple[jnp.ndarray, int]:
"""Get class groups for predict/calibrate from configuration.
Args:
config: sub-configuration to get groups from
Returns:
Class groups, number of groups
"""
classes = self.data['classes']
if config.class_groups:
groups = jnp.array(config.class_groups)
else:
groups = jnp.arange(classes)
if groups.size != classes:
raise ValueError('Loss matrix has to be num_classes x num_classes')
logging.info('Class groups to be used:')
logging.info(groups)
return groups, jnp.max(groups) + 1
def select_calibrate(
self, config: collections.ConfigDict) -> _CalibrateFn:
"""Select calibration function.
Args:
config: sub-configuration to determine calibration function
Returns:
Calibration function
"""
if config.method == 'threshold':
def calibrate_fn(logits, labels, unused_rng):
return cp.calibrate_threshold(logits, labels, alpha=config.alpha)
elif config.method == 'threshold_p':
def calibrate_fn(logits, labels, unused_rng):
probabilities = jax.nn.softmax(logits, axis=1)
return cp.calibrate_threshold(probabilities, labels, alpha=config.alpha)
elif config.method == 'threshold_logp':
def calibrate_fn(logits, labels, unused_rng):
log_probabilities = jax.nn.log_softmax(logits, axis=1)
return cp.calibrate_threshold(
log_probabilities, labels, alpha=config.alpha)
elif config.method == 'aps':
def calibrate_fn(logits, labels, rng):
probabilities = jax.nn.softmax(logits, axis=1)
return cp.calibrate_raps(
probabilities, labels, alpha=config.alpha,
k_reg=None, lambda_reg=None, rng=rng)
else:
raise ValueError('Invalid calibration method.')
return calibrate_fn
def select_smooth_predict(
self, config: collections.ConfigDict) -> SmoothPredictFn:
"""Select smooth confidence set prediction and calibration functions.
See smooth_conformal_prediction for options.
Args:
config: sub-configuration for selecting prediction/calibration function
Returns:
Smooth prediction function
"""
if config.method == 'threshold':
def smooth_predict_fn(logits, tau, unused_rng):
return scp.smooth_predict_threshold(
logits, tau,
temperature=config.temperature)
elif config.method == 'threshold_p':
def smooth_predict_fn(logits, tau, unused_rng):
probabilities = jax.nn.softmax(logits, axis=1)
return scp.smooth_predict_threshold(
probabilities, tau,
temperature=config.temperature)
elif config.method == 'threshold_logp':
def smooth_predict_fn(logits, tau, unused_rng):
log_probabilities = jax.nn.log_softmax(logits, axis=1)
return scp.smooth_predict_threshold(
log_probabilities, tau,
temperature=config.temperature)
elif config.method == 'aps':
sos = self.get_sos(self.data['classes'])
def smooth_predict_fn(logits, tau, rng):
probabilities = jax.nn.softmax(logits, axis=1)
return scp.smooth_predict_aps(
probabilities, tau,
temperature=config.temperature,
sos=sos, rng=rng if config.rng else None,
dispersion=config.dispersion)
else:
raise ValueError('Invalid smooth prediction method.')
return smooth_predict_fn
def get_loss_matrix(self, config: collections.ConfigDict) -> jnp.ndarray:
"""Get loss matrix for coverage loss from configuration.
Args:
config: sub-configuration to get loss matrix from
Returns:
Loss matrix
"""
classes = self.data['classes']
if config.loss_matrix:
loss_matrix = jnp.array(config.loss_matrix).reshape(classes, classes)
else:
loss_matrix = jnp.identity(classes)
if loss_matrix.shape[0] != classes or loss_matrix.shape[1] != classes:
raise ValueError('Loss matrix has to be num_classes x num_classes')
logging.info('Loss matrix for classification loss to be used:')
logging.info(loss_matrix)
return loss_matrix
def select_coverage_loss(
self, config: collections.ConfigDict) -> CoverageLossFn:
"""Select coverage loss to use for training.
Args:
config: sub-configuration to select coverage loss
Returns:
Coverage loss
"""
loss_matrix = self.get_loss_matrix(config)
if config.coverage_loss == 'none':
def coverage_loss_fn(unused_confidence_sets, unused_labels):
return 0.
elif config.coverage_loss == 'absolute_coverage':
coverage_loss_fn = functools.partial(
cputils.compute_coverage_loss,
alpha=config.target_alpha, transform=jnp.abs)
elif config.coverage_loss == 'squared_coverage':
coverage_loss_fn = functools.partial(
cputils.compute_coverage_loss,
alpha=config.target_alpha, transform=jnp.square)
elif config.coverage_loss == 'classification':
coverage_loss_fn = functools.partial(
cputils.compute_general_classification_loss,
loss_matrix=loss_matrix)
elif config.coverage_loss == 'bce':
coverage_loss_fn = functools.partial(
cputils.compute_general_binary_cross_entropy_loss,
loss_matrix=loss_matrix)
else:
raise ValueError('Invalid coverage loss.')
return coverage_loss_fn
def select_size_loss(
self, config: collections.ConfigDict) -> SizeLossFn:
"""Select size loss to use.
Args:
config: sub-configuration to select size loss
Returns:
Size loss
"""
if config.size_transform == 'identity':
size_transform_fn = lambda x: x
elif config.size_transform == 'log':
size_transform_fn = jnp.log
elif config.size_transform == 'square':
size_transform_fn = jnp.square
elif config.size_transform == 'abs':
size_transform_fn = jnp.abs
else:
raise ValueError('Invalid size transform')
if config.size_loss == 'valid':
selected_size_loss_fn = functools.partial(
cputils.compute_hinge_size_loss, target_size=1,
transform=size_transform_fn)
elif config.size_loss == 'normal':
selected_size_loss_fn = functools.partial(
cputils.compute_hinge_size_loss, target_size=0,
transform=size_transform_fn)
elif config.size_loss == 'valid_bounded':
selected_size_loss_fn = functools.partial(
cputils.compute_hinge_bounded_size_loss, target_size=1,
bound_size=config.size_bound, bound_weight=config.size_bound_weight,
transform=size_transform_fn)
elif config.size_loss == 'normal_bounded':
selected_size_loss_fn = functools.partial(
cputils.compute_hinge_bounded_size_loss, target_size=0,
bound_size=config.size_bound, bound_weight=config.size_bound_weight,
transform=size_transform_fn)
elif config.size_loss == 'probabilistic':
selected_size_loss_fn = cputils.compute_probabilistic_size_loss
else:
raise ValueError('Invalid size loss.')
classes = self.data['classes']
if config.size_weights:
size_weights = jnp.array(config.size_weights)
else:
size_weights = jnp.ones(classes)
if size_weights.shape[0] != classes:
raise ValueError('Could not use size weights due to invalid shape: %d' % (
size_weights.shape[0]))
logging.info('Size weights by class for size loss to be used:')
logging.info(size_weights)
def size_loss_fn(confidence_sets, unused_logits, labels):
"""Wrapper for size loss as most size losses only need confidence_sets."""
weights = size_weights[labels]
return selected_size_loss_fn(confidence_sets, weights=weights)
return size_loss_fn
def select_loss_transform(
self, config: collections.ConfigDict) -> _LossTransformFn:
"""Select loss transform to apply.
Args:
config: sub-configuration to select loss transform
Returns:
Loss transform
"""
if config.loss_transform == 'identity':
loss_transform_fn = lambda array: array
elif config.loss_transform == 'log':
loss_transform_fn = jnp.log
elif config.loss_transform == 'inverse':
loss_transform_fn = lambda array: -1./array
elif config.loss_transform == 'inverse_square':
loss_transform_fn = lambda array: -1./(array**2)
else:
raise ValueError('Invalid loss transform.')
return loss_transform_fn
def get_train_fns(self) -> Tuple[cputils.LossFn, functools.partial]:
"""Define loss and update functions for training.
Returns:
Loss and update function
"""
self.fixed_smooth_predict_fn = self.select_smooth_predict(
self.config.coverage)
self.coverage_loss_fn = self.select_coverage_loss(self.config.coverage)
self.size_loss_fn = self.select_size_loss(self.config.coverage)
self.loss_transform_fn = self.select_loss_transform(self.config.coverage)
loss_fn = self.compute_loss_and_error
update_fn = functools.partial(
cputils.update, loss_fn=loss_fn, optimizer=self.optimizer)
if self.config.jit:
loss_fn = jax.jit(loss_fn, static_argnames='training')
update_fn = jax.jit(update_fn, static_argnames='training')
return loss_fn, update_fn
def calibrate(self, params, model_state, rng):
"""Calibrate fixed tau used for coverage training.
Args:
params: model parameters
model_state: model state
rng: random key sequence
Returns:
Calibrated tau
"""
if self.config.mode != 'coverage':
raise ValueError(
'Trying to calibrate tau before training but '
'not in coverage training mode.')
if not self.calibrate_fn:
raise ValueError(
'Trying to calibrate for fine-tuning but calibrate_fn not defined; '
'in coverage training mode this should not happen.')
# When not fine-tuning, tau can be arbitrary in most cases.
# For fine-tuning, we calibrate tau once as the model usually
# performs quite well already.
tau = self.config.coverage.tau
if self.config.finetune.enabled:
val_ds = itertools.islice(
cpdata.load_batches(self.data['train']),
0, self.config.coverage.calibration_batches)
logits = []
labels = []
for inputs_b, labels_b in val_ds:
logits_b, _ = self.model.apply(
params, model_state, None, inputs_b, training=False)
logits.append(logits_b)
labels.append(labels_b)
logits = jnp.concatenate(logits, axis=0)
labels = jnp.concatenate(labels, axis=0)
tau = self.calibrate_fn(logits, labels, next(rng))
logging.info('Threshold after calibration of pre-trained model: %g', tau)
return tau
def run(self, rng: hk.PRNGSequence):
"""Main training procedure but with calibration if fine-tuning.
Args:
rng: random key sequence
"""
trainable_params, fixed_params, model_state = self.setup(rng)
self.calibrate_fn = self.select_calibrate(self.config.coverage)
self.tau = self.calibrate(
hk.data_structures.merge(trainable_params, fixed_params),
model_state, rng)
params, model_state = self.train(
trainable_params, fixed_params, model_state, rng)
self.test(params, model_state)
| conformal_training-main | train_coverage.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for evaluation utilities."""
import os
from absl.testing import absltest
from absl.testing import parameterized
import jax
import ml_collections as collections
import numpy as np
import colab_utils as cpcolab
import data_utils as cpdatautils
import test_utils as cptutils
class ColabUtilsTest(parameterized.TestCase):
def _get_model(self, num_examples, num_classes):
val_examples = num_examples//2
labels = cptutils.get_labels(num_examples, num_classes)
logits = cptutils.get_probabilities(labels, dominance=0.5)
config = collections.ConfigDict()
config.dataset = 'cifar10'
config.val_examples = val_examples
data = cpdatautils.get_data_stats(config)
data['groups'] = {'groups': cpcolab.get_groups(config.dataset, 'groups')}
model = {
'val_logits': logits[:val_examples],
'val_labels': labels[:val_examples],
'test_logits': logits[val_examples:],
'test_labels': labels[val_examples:],
'data': data,
}
return model
def _check_results(self, results):
self.assertIn('mean', results.keys())
self.assertIn('std', results.keys())
if os.getenv('EVAL_VAL', '0') == '1':
self.assertIn('val', results['mean'].keys())
self.assertIn('test', results['mean'].keys())
# Just test whether some basic metrics are there and not NaN or so.
metrics_to_check = [
'size', 'coverage', 'accuracy',
'class_size_0', 'class_coverage_0',
'size_0', 'cumulative_size_0',
'groups_miscoverage',
]
if os.getenv('EVAL_CONFUSION') == '1':
metrics_to_check += [
'classification_confusion_0_0', 'coverage_confusion_0_0'
]
for metric in metrics_to_check:
mean = results['mean']['test'][metric]
std = results['std']['test'][metric]
self.assertFalse(np.isnan(mean))
self.assertFalse(np.isinf(mean))
self.assertGreaterEqual(mean, 0.)
self.assertFalse(np.isnan(std))
self.assertFalse(np.isinf(std))
self.assertGreaterEqual(std, 0.)
# Extra check for cumulative size
self.assertAlmostEqual(results['mean']['test']['cumulative_size_9'], 1)
def test_evaluate_conformal_prediction(self):
num_examples = 1000
num_classes = 10
model = self._get_model(num_examples, num_classes)
calibrate_fn, predict_fn = cpcolab.get_threshold_fns(0.05, jit=True)
rng = jax.random.PRNGKey(0)
results = cpcolab.evaluate_conformal_prediction(
model, calibrate_fn, predict_fn, trials=2, rng=rng)
self._check_results(results)
if __name__ == '__main__':
absltest.main()
| conformal_training-main | colab_utils_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for evaluation metrics."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
import evaluation as cpeval
import test_utils as cptutils
class EvaluationTest(parameterized.TestCase):
@parameterized.parameters([
dict(num_examples=100000, num_classes=10),
dict(num_examples=100000, num_classes=100),
dict(num_examples=1000000, num_classes=100)
])
def test_compute_accuracy_random(self, num_examples, num_classes):
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 0)
accuracy = cpeval.compute_accuracy_with_checks(probabilities, labels)
expected_accuracy = 1./num_classes
self.assertGreaterEqual(accuracy, expected_accuracy - 2e-2)
self.assertGreaterEqual(expected_accuracy + 2e-2, accuracy)
@parameterized.parameters([
dict(num_examples=1000000, num_classes=10, num_selected=0),
dict(num_examples=1000000, num_classes=10, num_selected=100000),
dict(num_examples=1000000, num_classes=10, num_selected=500000),
])
def test_compute_conditional_accuracy_random(
self, num_examples, num_classes, num_selected):
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 0)
conditional_labels = jnp.zeros(labels.shape).astype(int)
conditional_labels = conditional_labels.at[:num_selected].set(1)
accuracy = cpeval.compute_conditional_accuracy_with_checks(
probabilities, labels, conditional_labels, 1)
expected_accuracy = (1./num_classes) if num_selected > 0 else 0
self.assertGreaterEqual(accuracy, expected_accuracy - 2e-2)
self.assertGreaterEqual(expected_accuracy + 2e-2, accuracy)
@parameterized.parameters([
dict(num_examples=10000, num_classes=10),
dict(num_examples=10000, num_classes=100),
])
def test_compute_accuracy_correct(self, num_examples, num_classes):
labels = cptutils.get_labels(num_examples, num_classes)
probabilities = cptutils.get_probabilities(labels, 1)
accuracy = cpeval.compute_accuracy_with_checks(probabilities, labels)
self.assertAlmostEqual(accuracy, 1)
@parameterized.parameters([
dict(probabilities=np.array([]), labels=np.ones((100))),
dict(probabilities=np.zeros((100, 10)), labels=np.array([])),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)) * 99),
dict(probabilities=np.zeros((100)),
labels=np.ones((100))),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100, 10))),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((99))),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((99))),
])
def test_compute_accuracy_errors(self, probabilities, labels):
with self.assertRaises(ValueError):
cpeval.compute_accuracy_with_checks(
jnp.array(probabilities), jnp.array(labels))
@parameterized.parameters([
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.array([])),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.ones((99))),
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.ones((100, 10))),
])
def test_compute_conditional_accuracy_errors(
self, probabilities, labels, conditional_labels):
with self.assertRaises(ValueError):
cpeval.compute_conditional_accuracy_with_checks(
jnp.array(probabilities), jnp.array(labels),
jnp.array(conditional_labels), 1)
@parameterized.parameters([
dict(probabilities=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.ones((100))),
])
def test_compute_accuracy_jit(
self, probabilities, labels, conditional_labels):
compute_conditional_accuracy_fn = jax.jit(
cpeval.compute_conditional_accuracy)
compute_conditional_accuracy_fn(
jnp.array(probabilities), jnp.array(labels),
jnp.array(conditional_labels), 1)
@parameterized.parameters([
dict(num_examples=10000, num_classes=10),
dict(num_examples=10000, num_classes=100),
dict(num_examples=100000, num_classes=100),
])
def test_compute_coverage_simple(self, num_examples, num_classes):
labels = cptutils.get_labels(num_examples, num_classes)
# Case: all zeros.
confidence_sets = jnp.zeros((num_examples, num_classes))
coverage = cpeval.compute_coverage_with_checks(confidence_sets, labels)
self.assertAlmostEqual(coverage, 0)
# Case: all ones.
confidence_sets = jnp.ones((num_examples, num_classes))
coverage = cpeval.compute_coverage_with_checks(confidence_sets, labels)
self.assertAlmostEqual(coverage, 1)
# Case: one hot of true class.
confidence_sets = jnp.zeros((num_examples, num_classes))
confidence_sets = confidence_sets.at[
(jnp.arange(confidence_sets.shape[0]), labels)].set(1)
self.assertAlmostEqual(coverage, 1)
@parameterized.parameters([
dict(num_examples=500000, num_classes=10),
dict(num_examples=5000000, num_classes=100),
])
def test_compute_coverage_random(self, num_examples, num_classes):
labels = cptutils.get_labels(num_examples, num_classes)
# First case, only true label or zeros.
confidence_sets = jnp.zeros((num_examples, num_classes))
rand = jnp.array(np.random.random((num_examples)))
confidence_sets = confidence_sets.at[
(jnp.arange(confidence_sets.shape[0]), labels)].set(
(rand <= 0.5).astype(int))
coverage = cpeval.compute_coverage_with_checks(confidence_sets, labels)
self.assertAlmostEqual(coverage, 0.5, places=1)
# First case, everything one except true label for some rows.
confidence_sets = jnp.ones((num_examples, num_classes))
confidence_sets = confidence_sets.at[
(jnp.arange(confidence_sets.shape[0]), labels)].set(
(rand <= 0.5).astype(int))
coverage = cpeval.compute_coverage_with_checks(confidence_sets, labels)
self.assertAlmostEqual(coverage, 0.5, places=1)
@parameterized.parameters([
dict(num_examples=5000000, num_classes=10, num_selected=0),
dict(num_examples=5000000, num_classes=10, num_selected=500000),
])
def test_compute_conditional_coverage_random(
self, num_examples, num_classes, num_selected):
confidence_sets = jnp.zeros((num_examples, num_classes))
labels = cptutils.get_labels(num_examples, num_classes)
conditional_labels = jnp.zeros(labels.shape).astype(int)
conditional_labels = conditional_labels.at[:num_selected].set(1)
rand = jnp.array(np.random.random((num_examples)))
confidence_sets = confidence_sets.at[
(jnp.arange(confidence_sets.shape[0]), labels)].set(
(rand <= 0.5).astype(int))
coverage = cpeval.compute_conditional_coverage_with_checks(
confidence_sets, labels, conditional_labels, 1)
expected_coverage = 0.5 if num_selected > 0 else 1
self.assertAlmostEqual(coverage, expected_coverage, places=1)
@parameterized.parameters([
dict(confidence_sets=np.array([]), labels=np.ones((100))),
dict(confidence_sets=np.zeros((100, 10)), labels=np.array([])),
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((100)) * 99),
dict(confidence_sets=np.zeros((100)),
labels=np.ones((100))),
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((100, 10))),
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((99))),
])
def test_compute_coverage_errors(self, confidence_sets, labels):
with self.assertRaises(ValueError):
cpeval.compute_coverage_with_checks(
jnp.array(confidence_sets), jnp.array(labels))
@parameterized.parameters([
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.array([])),
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.ones((99))),
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.ones((100, 10))),
])
def test_compute_conditional_coverage_errors(
self, confidence_sets, labels, conditional_labels):
with self.assertRaises(ValueError):
cpeval.compute_conditional_coverage_with_checks(
jnp.array(confidence_sets), jnp.array(labels),
jnp.array(conditional_labels), 1)
@parameterized.parameters([
dict(confidence_sets=np.zeros((100, 10)),
labels=np.ones((100)), conditional_labels=np.ones((100))),
])
def test_compute_conditional_coverage_jit(
self, confidence_sets, labels, conditional_labels):
compute_conditional_coverage_fn = jax.jit(
cpeval.compute_conditional_coverage)
compute_conditional_coverage_fn(
jnp.array(confidence_sets), jnp.array(labels),
jnp.array(conditional_labels), 1)
@parameterized.parameters([
dict(num_examples=100000, num_classes=10, fraction=0.1),
dict(num_examples=100000, num_classes=10, fraction=0.5),
])
def test_compute_size(self, num_examples, num_classes, fraction):
confidence_sets = np.random.random((num_examples, num_classes))
confidence_sets = jnp.array(confidence_sets <= fraction).astype(int)
size, count = cpeval.compute_size_with_checks(confidence_sets)
expected_size = num_classes * fraction
self.assertEqual(count, num_examples)
self.assertAlmostEqual(size, expected_size, places=1)
@parameterized.parameters([
dict(num_examples=100000, num_classes=10,
fraction=0.1, num_selected=0),
dict(num_examples=100000, num_classes=10,
fraction=0.5, num_selected=50000),
])
def test_compute_conditional_size(
self, num_examples, num_classes, fraction, num_selected):
confidence_sets = np.random.random((num_examples, num_classes))
confidence_sets = jnp.array(confidence_sets <= fraction).astype(int)
conditional_labels = jnp.zeros(confidence_sets.shape[0]).astype(int)
conditional_labels = conditional_labels.at[:num_selected].set(1)
size, count = cpeval.compute_conditional_size_with_checks(
confidence_sets, conditional_labels, 1)
expected_size = (num_classes * fraction) if num_selected > 0 else 0
self.assertEqual(count, num_selected)
self.assertAlmostEqual(size, expected_size, places=1)
@parameterized.parameters([
dict(confidence_sets=np.array([])),
dict(confidence_sets=np.zeros((100))),
])
def test_compute_size_errors(self, confidence_sets):
with self.assertRaises(ValueError):
cpeval.compute_size_with_checks(jnp.array(confidence_sets))
@parameterized.parameters([
dict(confidence_sets=np.zeros((100, 10)),
conditional_labels=np.array([])),
dict(confidence_sets=np.zeros((100, 10)),
conditional_labels=np.ones((99))),
dict(confidence_sets=np.zeros((100, 10)),
conditional_labels=np.ones((100, 10))),
])
def test_compute_conditional_size_errors(
self, confidence_sets, conditional_labels):
with self.assertRaises(ValueError):
cpeval.compute_conditional_size_with_checks(
jnp.array(confidence_sets), jnp.array(conditional_labels), 1)
@parameterized.parameters([
dict(confidence_sets=np.zeros((100, 10)),
conditional_labels=np.ones((100))),
])
def test_compute_conditional_size_jit(
self, confidence_sets, conditional_labels):
compute_conditional_size_fn = jax.jit(cpeval.compute_conditional_size)
compute_conditional_size_fn(
jnp.array(confidence_sets), jnp.array(conditional_labels), 1)
if __name__ == '__main__':
absltest.main()
| conformal_training-main | evaluation_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluate experiment."""
import os
import sys
from absl import flags
from absl import logging
import jax
from absl import app
import colab_utils as cbutils
FLAGS = flags.FLAGS
flags.DEFINE_string('experiment_path', './', 'base path for experiments')
flags.DEFINE_string('experiment_dataset', '', 'dataset to evaluate')
flags.DEFINE_string(
'experiment_method', 'thr', 'conformal predictor to use, thr or apr')
flags.DEFINE_boolean('experiment_logfile', False,
'log results to file in experiment_path')
def main(argv):
del argv
if FLAGS.experiment_logfile:
logging.get_absl_handler().use_absl_log_file(
f'eval_{FLAGS.experiment_method}', FLAGS.experiment_path)
else:
logging.get_absl_handler().python_handler.stream = sys.stdout
if not os.path.exists(FLAGS.experiment_path):
logging.error('could not find experiment path %s', FLAGS.experiment_path)
return
alpha = 0.01
if FLAGS.experiment_method == 'thr':
calibrate_fn, predict_fn = cbutils.get_threshold_fns(alpha)
elif FLAGS.experiment_method == 'aps':
calibrate_fn, predict_fn = cbutils.get_raps_fns(alpha, 0, 0)
else:
raise ValueError('Invalid conformal predictor, choose thr or aps.')
if FLAGS.experiment_dataset == 'mnist':
num_classes = 10
groups = ['singleton', 'groups']
elif FLAGS.experiment_dataset == 'emnist_byclass':
num_classes = 52
groups = ['groups']
elif FLAGS.experiment_dataset == 'fashion_mnist':
num_classes = 10
groups = ['singleton']
elif FLAGS.experiment_dataset == 'cifar10':
num_classes = 10
groups = ['singleton', 'groups']
elif FLAGS.experiment_dataset == 'cifar100':
num_classes = 100
groups = ['groups', 'hierarchy']
else:
raise ValueError('Invalid dataset %s.' % FLAGS.experiment_dataset)
model = cbutils.load_predictions(FLAGS.experiment_path, val_examples=5000)
for group in groups:
model['data']['groups'][group] = cbutils.get_groups(
FLAGS.experiment_dataset, group)
results = cbutils.evaluate_conformal_prediction(
model, calibrate_fn, predict_fn, trials=10, rng=jax.random.PRNGKey(0))
logging.info('Accuracy: %f', results['mean']['test']['accuracy'])
logging.info('Coverage: %f', results['mean']['test']['coverage'])
logging.info('Size: %f', results['mean']['test']['size'])
for k in range(num_classes):
logging.info(
'Class size %d: %f', k, results['mean']['test'][f'class_size_{k}'])
for group in groups:
k = 0
key = f'{group}_size_{k}'
while key in results['mean']['test'].keys():
logging.info(
'Group %s size %d: %f', group, k, results['mean']['test'][key])
k += 1
key = f'{group}_size_{k}'
logging.info(
'Group %s miscoverage 0: %f',
group, results['mean']['test'][f'{group}_miscoverage_0'])
logging.info(
'Group %s miscoverage 1: %f',
group, results['mean']['test'][f'{group}_miscoverage_1'])
# Selected coverage confusion combinations:
logging.info(
'Coverage confusion 4-6: %f',
results['mean']['test']['coverage_confusion_4_6'])
logging.info(
'Coverage confusion 6-4: %f',
results['mean']['test']['coverage_confusion_6_4'])
if __name__ == '__main__':
app.run(main)
| conformal_training-main | eval.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main file to run training."""
from absl import flags
from ml_collections import config_flags
from absl import app
from train import train
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', 'config.py', 'Configuration.')
def main(argv):
"""Main method when called from command line."""
del argv
config = FLAGS.config
train(config)
if __name__ == '__main__':
app.run(main)
| conformal_training-main | main.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing sorting networks."""
import numpy as np
jnp = np
SNET_10 = [[[0, 1], [3, 4], [5, 6], [8, 9]],
[[2, 4], [7, 9]],
[[2, 3], [1, 4], [7, 8], [6, 9]],
[[0, 3], [5, 8], [4, 9]],
[[0, 2], [1, 3], [5, 7], [6, 8]],
[[1, 2], [6, 7], [0, 5], [3, 8]],
[[1, 6], [2, 7], [4, 8]],
[[1, 5], [3, 7]],
[[4, 7], [2, 5], [3, 6]],
[[4, 6], [3, 5]],
[[4, 5]]]
def comm_pattern_bitonic(num_bits):
"""Bitonic sort communication pattern on a hypercube of size 2**num_bits.
Args:
num_bits: size of the array to be sorted is 2**num_bits
Returns:
comms: Catalog
"""
total_stages = num_bits*(num_bits+1)//2
edge_list = []
absolute_substage = 0
for stage in range(num_bits):
for substage in range(stage+1):
i = np.arange(2**(num_bits-stage+substage-1))
j = np.arange(2**(stage-substage))
idx1 = jnp.reshape(
i.reshape((i.shape[0], 1))*2**(stage-substage+1)
+ j.reshape((1, j.shape[0])), (i.shape[0]*j.shape[0]))
idx2 = idx1 + 2**(stage-substage)
direction = (idx1 // (2**(stage+1))) % 2
edges = np.zeros([2**(num_bits-1), 2], dtype=np.int32)
edges[:, 0] = np.where(direction == 0, idx1, idx2)
edges[:, 1] = np.where(direction == 0, idx2, idx1)
edge_list.append(jnp.array(edges))
absolute_substage += 1
return {"alg": "bitonic",
"num_wires": 2**num_bits,
"num_stages": total_stages,
"num_comparators": total_stages*(2**(num_bits-1)),
"edge_list": edge_list}
def comm_pattern_from_list(snet_list, make_parallel=False):
"""A fixed network from a list of comperators.
Args:
snet_list: List of stages. stages is also a list of edges
make_parallel: (Optional) Organize parallel exeecutable comparators
Returns:
comms: Catalog. We make sure that edge_list is in sorted form
"""
if make_parallel:
snet_list = parallelize(snet_list)
total_stages = len(snet_list)
edge_list = []
max_wire_seen = 0
comp_count = 0
for a in snet_list:
v = np.array(a)
max_wire_seen = max(max_wire_seen, np.max(v))
comp_count = comp_count + v.shape[0]
idx = np.argsort(v[:, 0])
edge_list.append(jnp.array(v[idx, :]))
return {"alg": "fixed",
"num_wires": max_wire_seen+1,
"num_stages": total_stages,
"num_comparators": comp_count,
"edge_list": edge_list}
def prune(snet_list, keep):
"""Prune comparators not used for wires in keep."""
keep = set(keep)
pruned_list = [[]]
for stage in reversed(snet_list):
if pruned_list[0]:
pruned_list.insert(0, [])
for edge in stage:
if (edge[0] in keep) or (edge[1] in keep):
keep.update(edge)
pruned_list[0].append(edge)
return pruned_list
def parallelize(snet_lst):
"""Organize comparators that can be run in parallel in stages.
We visit each comparator in the sequence and try to place it
to the earliest stage by starting from the last stage constructed.
Args:
snet_lst: List of sorting network stages (that are lists of edges)
Returns:
stage: Rearanged comparators as stages
"""
stage_sets = [set()]
stage = [[]]
for edge_lst in snet_lst:
for edge in edge_lst:
placed = False
place_here = len(stage)-1
for stage_idx in reversed(range(len(stage))):
if ((edge[0] not in stage_sets[stage_idx])
and (edge[1] not in stage_sets[stage_idx])):
place_here = stage_idx
placed = True
else:
break
if not placed:
stage.append([edge])
stage_sets.append(set(edge))
else:
stage[place_here].append(edge)
stage_sets[place_here].update(edge)
return stage
def generate_list_bitonic(length, make_parallel=True):
"""Generate a Bitonic sorting network list of arbitrary length.
Args:
length: Number of wires
make_parallel: Flag to organize parallel executable comparators into stages
Returns:
snet_list: list of pairwise swaps
"""
def greatest_power_of_two_less_than(n):
k = 1
while k > 0 and k < n:
k = k * 2
return k // 2
def bitonic_sort(lo, n, direction):
if n > 1:
m = n // 2
bitonic_sort(lo, m, not direction)
bitonic_sort(lo+m, n-m, direction)
bitonic_merge(lo, n, direction)
def bitonic_merge(lo, n, direction):
if n > 1:
m = greatest_power_of_two_less_than(n)
for i in range(lo, lo+n-m):
if direction:
snet_list.append([[i, i+m]])
else:
snet_list.append([[i+m, i]])
bitonic_merge(lo, m, direction)
bitonic_merge(lo+m, n-m, direction)
snet_list = []
bitonic_sort(0, length, True)
return parallelize(snet_list) if make_parallel else snet_list
def comm_pattern_batcher(length, make_parallel=True):
"""Batcher bitonic communication pattern for an array with size length."""
snet_list = generate_list_bitonic(length, make_parallel)
comms = comm_pattern_from_list(snet_list)
comms["alg"] = "batcher-bitonic"
return comms
| conformal_training-main | sorting_nets.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of recent conformal prediction approaches.
Implements conformal prediction from [1,2,3]:
[1] Yaniv Romano, Matteo Sesia, Emmanuel J. Candes.
Classification withvalid and adaptive coverage.
NeurIPS, 2020.
[2] Anastasios N. Angelopoulos, Stephen Bates, Michael Jordan, Jitendra Malik.
Uncertainty sets for image classifiers using conformal prediction.
ICLR, 2021
[3] Mauricio Sadinle, Jing Lei, and Larry A. Wasserman.
Least ambiguous set-valued classifiers with bounded error levels.
ArXiv, 2016.
"""
from typing import Optional, Callable, Any
import jax
import jax.numpy as jnp
_QuantileFn = Callable[[Any, float], float]
_CalibrateFn = Callable[[jnp.ndarray, jnp.ndarray, jnp.ndarray], Any]
_PredictFn = Callable[[jnp.ndarray, Any, jnp.ndarray], jnp.ndarray]
_SelectFn = Callable[[jnp.ndarray, jnp.ndarray], float]
def _check_conformal_quantile(array: jnp.ndarray, q: float):
"""Helper to check quantile arguments.
Args:
array: input array to compute quantile of
q: quantile to compute
Raises:
ValueError: if shape or q invalid.
"""
if array.size == 0:
raise ValueError('Expecting non-empty array.')
if array.ndim != 1:
raise ValueError('Expecting array of shape n.')
if q < 0 or q > 1:
raise ValueError('Expecting q in [0,1].')
def conformal_quantile(array: jnp.ndarray, q: float) -> float:
"""Corrected quantile for conformal prediction.
Wrapper for np.quantile, but instead of obtaining the q-quantile,
it computes the (1 + 1/array.shape[0]) * q quantile. For conformal
prediction, this is needed to obtain the guarantees for future test
examples, see [1] Appendix Lemma for details.
[1] Yaniv Romano, Evan Petterson, Emannuel J. Candes.
Conformalized quantile regression. NeurIPS, 2019.
Args:
array: input array to compute quantile of
q: quantile to compute
Returns:
(1 + 1/array.shape[0]) * q quantile of array.
"""
# Using midpoint here to be comparable to the smooth implementation
# in smooth_conformal_prediction which uses smooth sort to compute quantiles.
return jnp.quantile(
array, (1 + 1./array.shape[0]) * q, method='midpoint')
def conformal_quantile_with_checks(array: jnp.ndarray, q: float) -> float:
"""conformal_quantile with extra argument checks raising ValueError."""
_check_conformal_quantile(array, q)
return conformal_quantile(array, q)
def _check_predict(probabilities: jnp.ndarray):
"""Helper to check probabilities for prediction.
Args:
probabilities: predicted probabilities on test set
Raises:
ValueError if shape is incorrect.
"""
if probabilities.ndim != 2:
raise ValueError('Expecting probabilities of shape n_examples x n_classes.')
if probabilities.size == 0:
raise ValueError('probabilities is empty.')
def _check_calibrate(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: Optional[float] = None):
"""Helper to check shape of probabilities, labels and alpha for calibration.
Args:
probabilities: predicted probabilities on a validation set
labels: ground truth labels on validation set
alpha: confidence level
Raises:
ValueError if shapes do not match.
"""
if probabilities.ndim != 2:
raise ValueError('Expecting probabilities of shape n_examples x n_classes.')
if labels.ndim != 1:
raise ValueError('Expecting labels of shape n_examples.')
if not jnp.issubdtype(labels.dtype, jnp.integer):
raise ValueError('Expecting labels to be integers.')
if jnp.max(labels) >= probabilities.shape[1]:
raise ValueError('More labels than predicted in probabilities.')
if probabilities.size == 0:
raise ValueError('probabilities is empty.')
if probabilities.shape[0] != labels.shape[0]:
raise ValueError(
'Number of predicted probabilities does not match number of labels.')
if alpha is not None:
if alpha < 0 or alpha > 1:
raise ValueError('Expecting alpha to be in [0, 1].')
def calibrate_threshold(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float = 0.1,
quantile_fn: _QuantileFn = conformal_quantile) -> float:
"""Probability/logit thresholding baseline calibration procedure.
Finds a threshold based on input probabilities or logits. Confidence sets
are defined as all classes above the threshold.
Args:
probabilities: predicted probabilities on validation set
labels: ground truth labels on validation set
alpha: confidence level
quantile_fn: function to compute conformal quantile
Returns:
Threshold used to construct confidence sets
"""
conformity_scores = probabilities[
jnp.arange(probabilities.shape[0]), labels.astype(int)]
return quantile_fn(conformity_scores, alpha)
def calibrate_threshold_with_checks(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float = 0.1,
quantile_fn: _QuantileFn = conformal_quantile_with_checks) -> float:
"""calibrate_threshold with extra argument checks raising ValueError."""
_check_calibrate(probabilities, labels, alpha)
return calibrate_threshold(probabilities, labels, alpha, quantile_fn)
def predict_threshold(probabilities: jnp.ndarray, tau: float) -> jnp.ndarray:
"""Probability/logit threshold baseline.
Predicts all classes with probabilities/logits above given threshold
as confidence sets.
Args:
probabilities: predicted probabilities on test set
tau: threshold for probabilities or logits
Returns:
Confidence sets as 0-1array of same size as probabilities.
"""
confidence_sets = (probabilities >= tau)
return confidence_sets.astype(int)
def predict_threshold_with_checks(
probabilities: jnp.ndarray, tau: float) -> jnp.ndarray:
"""predict_threshold with extra argument checks raising ValueError."""
_check_predict(probabilities)
# tau can be unconstrained (i.e., also negative) as it might have been
# calibrated on logits.
return predict_threshold(probabilities, tau)
def _check_reg(classes: int, k_reg: Optional[int], lambda_reg: Optional[float]):
"""Helper for checking valid regularization arguments.
Args:
classes: number of classes
k_reg: target size of confidence sets
lambda_reg: strength of regularization
Raises:
Value Error if regularization arguments are incorrect.
"""
if k_reg is not None and lambda_reg is not None:
if lambda_reg < 0:
raise ValueError('Expecting k_lambda to be a float >= 0.')
if k_reg < 0 or k_reg > classes:
raise ValueError('Expecting k_reg to be an int in [0, n_classes].')
def calibrate_raps(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float = 0.1,
k_reg: Optional[int] = None,
lambda_reg: Optional[float] = None,
rng: Optional[jnp.array] = None,
quantile_fn: _QuantileFn = conformal_quantile) -> float:
"""Implementation of calibration for adaptive prediction sets.
Following [1] and [2], this function implements adaptive prediction sets (APS)
-- i.e., conformal classification. This methods estimates tau as outlined in
[2] but without the confidence set size regularization.
[1] Yaniv Romano, Matteo Sesia, Emmanuel J. Candes.
Classification withvalid and adaptive coverage.
NeurIPS, 2020.
[2] Anastasios N. Angelopoulos, Stephen Bates, Michael Jordan, Jitendra Malik.
Uncertainty sets for image classifiers using conformal prediction.
ICLR, 2021
Args:
probabilities: predicted probabilities on validation set
labels: ground truth labels on validation set
alpha: confidence level
k_reg: target confidence set size for regularization
lambda_reg: regularization weight
rng: random key for uniform variables
quantile_fn: function to compute conformal quantile
Returns:
Threshold tau such that with probability 1 - alpha, the confidence set
constructed from tau includes the true label
"""
reg = k_reg is not None and lambda_reg is not None
sorting = jnp.argsort(-probabilities, axis=1)
reverse_sorting = jnp.argsort(sorting)
indices = jnp.indices(probabilities.shape)
sorted_probabilities = probabilities[indices[0], sorting]
cum_probabilities = jnp.cumsum(sorted_probabilities, axis=1)
rand = jnp.zeros((sorted_probabilities.shape[0]))
if rng is not None:
rand = jax.random.uniform(rng, shape=(sorted_probabilities.shape[0],))
cum_probabilities -= jnp.expand_dims(rand, axis=1) * sorted_probabilities
conformity_scores = cum_probabilities[
jnp.arange(cum_probabilities.shape[0]),
reverse_sorting[jnp.arange(reverse_sorting.shape[0]), labels]]
if reg:
# in [2], it seems that L_i can be zero (i.e., true class has highest
# probability), but we add + 1 in the second line for validation
# as the true class is included by design and only
# additional classes should be regularized
conformity_reg = reverse_sorting[jnp.arange(reverse_sorting.shape[0]),
labels]
conformity_reg = conformity_reg - k_reg + 1
conformity_reg = lambda_reg * jnp.maximum(conformity_reg, 0)
conformity_scores += conformity_reg
tau = quantile_fn(conformity_scores, 1 - alpha)
return tau
def calibrate_raps_with_checks(
probabilities: jnp.ndarray,
labels: jnp.ndarray,
alpha: float = 0.1,
k_reg: Optional[int] = None,
lambda_reg: Optional[float] = None,
rng: Optional[jnp.array] = None,
quantile_fn: _QuantileFn = conformal_quantile) -> float:
"""calibrate_raps with extra argument checks raising ValueError."""
_check_calibrate(probabilities, labels, alpha)
_check_reg(probabilities.shape[1], k_reg, lambda_reg)
return calibrate_raps(
probabilities, labels, alpha, k_reg, lambda_reg, rng, quantile_fn)
def predict_raps(
probabilities: jnp.ndarray,
tau: float,
k_reg: Optional[int] = None,
lambda_reg: Optional[float] = None,
rng: Optional[jnp.array] = None) -> jnp.ndarray:
"""Get confidence sets using tau computed via aps_calibrate.
Given threshold tau, construct confidence sets as the top-k classes
such that the sum of probabilities is still below tau and add the top-(k+1)
class depending on uniform random variables.
See calibrate_raps for details and references.
Args:
probabilities: predicted probabilities on test set
tau: threshold
k_reg: target confidence set size for regularization
lambda_reg: regularization weight
rng: random key for uniform variables
Returns:
Confidence sets as 0-1array of same size as probabilities.
"""
reg = k_reg is not None and lambda_reg is not None
sorting = jnp.argsort(-probabilities, axis=1)
indices = jnp.indices(probabilities.shape)
sorted_probabilities = probabilities[indices[0], sorting]
cum_probabilities = jnp.cumsum(sorted_probabilities, axis=1)
if reg:
# in [2], L is the number of classes for which cumulative probability
# mass and regularizer are below tau + 1, we account for that in
# the first line by starting to count at 1
reg_probabilities = jnp.repeat(
jnp.expand_dims(1 + jnp.arange(cum_probabilities.shape[1]), axis=0),
cum_probabilities.shape[0], axis=0)
reg_probabilities = reg_probabilities - k_reg
reg_probabilities = jnp.maximum(reg_probabilities, 0)
cum_probabilities += lambda_reg * reg_probabilities
rand = jnp.ones((sorted_probabilities.shape[0]))
if rng is not None:
rand = jax.random.uniform(rng, shape=(sorted_probabilities.shape[0],))
cum_probabilities -= jnp.expand_dims(rand, axis=1) * sorted_probabilities
sorted_confidence_sets = (cum_probabilities <= tau)
# reverse sorting by argsort the sorting indices
reverse_sorting = jnp.argsort(sorting, axis=1)
confidence_sets = sorted_confidence_sets[indices[0], reverse_sorting]
return confidence_sets.astype(int)
def predict_raps_with_checks(
probabilities: jnp.ndarray,
tau: float,
k_reg: Optional[int] = None,
lambda_reg: Optional[float] = None,
rng: Optional[jnp.array] = None) -> jnp.ndarray:
"""predict_raps with extra argument checks raising ValueError."""
_check_predict(probabilities)
_check_reg(probabilities.shape[1], k_reg, lambda_reg)
if tau < 0:
raise ValueError('Expecting threshold tau to be greater or equal to zero.')
return predict_raps(probabilities, tau, k_reg, lambda_reg, rng)
| conformal_training-main | conformal_prediction.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets and data augmentation."""
from typing import Tuple, Dict, Iterator, Any, Optional
import jax.numpy as jnp
import tensorflow as tf
import tensorflow_datasets as tfds
import auto_augment as augment
def load_data_split(
dataset: str = 'mnist',
val_examples: int = 10000,
data_dir: Optional[str] = './data',
) -> Dict[str, Any]:
"""Load 3-fold data split (train, val and test).
Get a 3-split of a dataset for conformal prediction.
We always preserve the original test set for comparable results,
but use a part of the training set as validation set.
This is used for datasets that come with both a train and a test split.
For datasets with only a train split, use create_data_split instead.
Args:
dataset: dataset to load
val_examples: number of validation examples to use
(will be the last val_examples examples from training set)
data_dir: data directory to load datasets in
Returns:
Three datasets corresponding to training, validation, test datasets,
and a tuple of the corresponding dataset info.
"""
if val_examples < 0:
raise ValueError('Cannot load a negative number of validation examples.')
if val_examples > 0:
train_ds, train_info = tfds.load(
dataset, split=f'train[:-{val_examples}]',
data_dir=data_dir, with_info=True)
val_ds = tfds.load(
dataset, split=f'train[-{val_examples}:]', data_dir=data_dir)
else:
train_ds, train_info = tfds.load(
dataset, split='train', data_dir=data_dir, with_info=True)
val_ds = None
test_ds, test_info = tfds.load(
dataset, split='test', data_dir=data_dir, with_info=True)
shape = tuple(train_info.features['image'].shape)
sizes = {
'train': train_info.splits['train'].num_examples - val_examples,
'val': val_examples,
'test': test_info.splits['test'].num_examples,
}
return {
'train': train_ds,
'val': val_ds,
'test': test_ds,
'sizes': sizes,
'shape': shape,
}
def create_data_split(
dataset: str, train_examples: int, val_examples: int,
padding_size: Optional[int] = None) -> Dict[str, Any]:
"""Create a 3-fold data split for a dataset with only a train split.
Also see load_data_split. This function has the same functionality but for
datasets which do not come with a train/test split by default.
Args:
dataset: dataset to load
train_examples: number of training examples to use
val_examples: number of validation examples to use
padding_size: dataset size with padding, allows to pad the dataset by repeat
the first few elements, can at most double the size
Returns:
Three datasets corresponding to training, validation, test datasets,
and a tuple of the corresponding dataset info.
"""
if train_examples <= 0:
raise ValueError(
'Cannot load a negative or zero number of training examples.')
if val_examples < 0:
raise ValueError('Cannot load a negative number of validation examples.')
ds, info = tfds.load(dataset, split='train', with_info=True)
if padding_size is not None:
ds = ds.repeat(2).take(padding_size)
if val_examples > 0:
val_ds = ds.skip(train_examples).take(val_examples)
else:
val_ds = None
train_ds = ds.take(train_examples)
test_ds = ds.skip(train_examples + val_examples)
if 'features' in info.features.keys():
shape = tuple(info.features['features'].shape)
elif 'image' in info.features.keys():
shape = tuple(info.features['image'].shape)
else:
raise ValueError('Could not determine feature/image shape.')
sizes = {
'train': train_examples,
'val': val_examples,
'test': info.splits['train'].num_examples - val_examples - train_examples,
}
return {
'train': train_ds,
'val': val_ds,
'test': test_ds,
'sizes': sizes,
'shape': shape,
}
def load_batches(
dataset: tf.data.Dataset) -> Iterator[Tuple[jnp.array, jnp.array]]:
"""Generator for iterating over batches.
Yields one batch of images and labels. Assumes a dataset on which
.batch was called to obtain proper batches.
Args:
dataset: the dataset to load batches from
Yields:
Two arrays corresponding to one batch of inputs and labels.
"""
for batch in tfds.as_numpy(dataset):
inputs = jnp.asarray(batch['image'])
labels = jnp.asarray(batch['label']).astype(int)
yield inputs, labels
def _augment_flip_crop(
image: tf.Tensor, shape: Tuple[int, int, int],
crop: int, mode: str, replace: int) -> tf.Tensor:
"""Apply random flip and crop augmentation.
Args:
image: input image
shape: image shape needed for cropping
crop: maximum cropping on each side
mode: mode used for padding before cropping, see tf.pad
replace: value to use for filling the cut out patch
Returns:
Augmented image.
"""
image = tf.image.random_flip_left_right(image)
image = tf.pad(
image, paddings=[[crop, crop], [crop, crop], [0, 0]], mode=mode,
constant_values=replace)
return tf.image.random_crop(image, shape)
def _augment_autoaugment(
image: tf.Tensor, shape: Tuple[int, int, int], replace: int) -> tf.Tensor:
"""Applies an AutoAugment policy to the input image.
Args:
image: input image
shape: image shape
replace: value to use for filling empty regions
Returns:
Augmented image
"""
return augment.distort_image_with_autoaugment(
image, augmentation_name='cifar10',
cutout_const=replace, translate_const=shape[1])
def augment_flip_crop(
batch: Dict[str, Any], shape: Tuple[int, int, int],
crop: int, mode: str, replace: int) -> Dict[str, Any]:
"""CIFAR10 standard data augmentation of clips and crops.
Args:
batch: dictionary containing single image and label
shape: image shape needed for cropping
crop: maximum cropping on each side
mode: mode used for padding before cropping, see tf.pad
replace: value to use for filling the cut out patch
Returns:
Dictionary with augmented image and unchanged label
"""
return {
'image': _augment_flip_crop(
batch['image'], shape=shape, crop=crop, mode=mode, replace=replace),
'label': batch['label']
}
def augment_autoaugment(
batch: Dict[str, Any], shape: Tuple[int, int, int],
replace: int) -> Dict[str, Any]:
"""CIFAR10 AutoAugment data augmentation.
Args:
batch: dictionary containing single image and label
shape: image shape
replace: value to use for filling the cut out patch
Returns:
Dictionary with augmented image and unchanged label
"""
return {
'image': _augment_autoaugment(
batch['image'], shape=shape, replace=replace),
'label': batch['label']
}
def augment_cutout(
batch: Dict[str, Any], replace: int, pad: int) -> Dict[str, Any]:
"""CIFAR10 augmentation with flip/crop, AutoAugment and Cutout.
Args:
batch: dictionary containing single image and label
replace: value to use for filling the cut out patch
pad: cutout size is 2*pad
Returns:
Dictionary with augmented image and unchanged label
"""
return {
'image': augment.cutout(batch['image'], pad_size=pad, replace=replace),
'label': batch['label']
}
| conformal_training-main | data.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for evaluation in Colabs or notebooks."""
from typing import Tuple, Callable, Dict, Any, List
from absl import logging
import jax
import jax.numpy as jnp
import numpy as np
import pandas as pd
import sklearn.metrics
import conformal_prediction as cp
import evaluation as cpeval
import open_source_utils as cpstaging
_CalibrateFn = Callable[[jnp.ndarray, jnp.ndarray, jnp.ndarray], float]
_PredictFn = Callable[[jnp.ndarray, jnp.ndarray, jnp.ndarray], jnp.ndarray]
load_predictions = cpstaging.load_predictions
def get_threshold_fns(
alpha: float, jit: bool = True) -> Tuple[_CalibrateFn, _PredictFn]:
"""Prediction and calibration function for threshold conformal prediction.
Args:
alpha: confidence level
jit: jit prediction and calibration function
Returns:
Calibration and prediction functions
"""
def calibrate_threshold_fn(logits, labels, rng): # pylint: disable=unused-argument
probabilities = jax.nn.softmax(logits, axis=1)
return cp.calibrate_threshold(
probabilities, labels, alpha=alpha)
def predict_threshold_fn(logits, tau, rng): # pylint: disable=unused-argument
probabilities = jax.nn.softmax(logits, axis=1)
return cp.predict_threshold(
probabilities, tau)
if jit:
calibrate_threshold_fn = jax.jit(calibrate_threshold_fn)
predict_threshold_fn = jax.jit(predict_threshold_fn)
return calibrate_threshold_fn, predict_threshold_fn
def get_raps_fns(
alpha: float, k_reg: int, lambda_reg: float,
jit: bool = True) -> Tuple[_CalibrateFn, _PredictFn]:
"""Prediction and calibration function for RAPS.
Args:
alpha: confidence level
k_reg: k for regularization
lambda_reg: lambda for regularization
jit: jit prediction and calibration function
Returns:
Calibration and prediction functions
"""
def calibrate_raps_fn(logits, labels, rng):
probabilities = jax.nn.softmax(logits, axis=1)
return cp.calibrate_raps(
probabilities, labels, alpha=alpha,
k_reg=k_reg, lambda_reg=lambda_reg, rng=rng)
def predict_raps_fn(logits, tau, rng):
probabilities = jax.nn.softmax(logits, axis=1)
return cp.predict_raps(
probabilities, tau, k_reg=k_reg, lambda_reg=lambda_reg, rng=rng)
if jit:
calibrate_raps_fn = jax.jit(calibrate_raps_fn)
predict_raps_fn = jax.jit(predict_raps_fn)
return calibrate_raps_fn, predict_raps_fn
def get_groups(dataset: str, key: str) -> jnp.ndarray:
"""Helper to define groups for evaluation.
Args:
dataset: dataset identifier
key: type of loss to load
Returns:
Class groups for given dataset and key
"""
if dataset == 'wine_quality':
if key == 'identity':
groups = jnp.arange(2)
else:
raise NotImplementedError
elif dataset == 'mnist':
if key == 'identity':
groups = jnp.arange(10)
elif key == 'singleton':
# Hardest class.
groups = jnp.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0], int)
elif key == 'groups':
# Odd vs. even.
groups = jnp.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1], int)
else:
raise NotImplementedError
elif dataset == 'emnist_byclass':
if key == 'identity':
groups = jnp.arange(52)
elif key == 'groups':
groups = jnp.array([
# Upper case:
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
# Lower case:
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1,
], int)
else:
raise NotImplementedError
elif dataset == 'fashion_mnist':
if key == 'identity':
groups = jnp.arange(10)
elif key == 'singleton':
# Hardest class.
groups = jnp.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0], int)
else:
raise NotImplementedError
elif dataset == 'cifar10':
if key == 'identity':
groups = jnp.arange(10)
elif key == 'singleton':
# Hardest class.
groups = jnp.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0], int)
elif key == 'groups':
# Human-made vs. animals.
groups = jnp.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0], int)
else:
raise NotImplementedError
elif dataset == 'cifar100':
if key == 'identity':
groups = jnp.arange(100)
elif key == 'groups':
# Human-made vs. animals or natural scenes,
# people are considered human-made.
# Grouping happened on coarse class ids, not fine ones.
groups = jnp.array([
1, 1, 0, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 1, 0, 1, 1, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
0, 0, 1, 1, 1, 1, 0, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 0, 1, 1, 1, 1, 1, 1, 0, 0,
1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 0, 1, 1, 0, 0, 0, 0, 1, 0,
0, 1, 1, 1, 0, 1, 1, 1, 0, 1,
], int)
elif key == 'hierarchy':
# These are essentially the coarse labels of CIFAR100.
groups = jnp.array([
4, 1, 14, 8, 0, 6, 7, 7, 18, 3,
3, 14, 9, 18, 7, 11, 3, 9, 7, 11,
6, 11, 5, 10, 7, 6, 13, 15, 3, 15,
0, 11, 1, 10, 12, 14, 16, 9, 11, 5,
5, 19, 8, 8, 15, 13, 14, 17, 18, 10,
16, 4, 17, 4, 2, 0, 17, 4, 18, 17,
10, 3, 2, 12, 12, 16, 12, 1, 9, 19,
2, 10, 0, 1, 16, 12, 9, 13, 15, 13,
16, 19, 2, 4, 6, 19, 5, 5, 8, 19,
18, 1, 2, 15, 6, 0, 17, 8, 14, 13,
], int)
else:
raise NotImplementedError
else:
raise ValueError('No loss matrices defined for dataset %s.' % dataset)
return groups
def _evaluate_accuracy(
logits: jnp.ndarray, labels: jnp.ndarray) -> pd.DataFrame:
"""Helper to compute accuracy on single dataset.
Args:
logits: predicted logits
labels: ground truth labels
Returns:
Accuracy and list of class-conditional accuracies
"""
classes = logits.shape[1]
probabilities = jax.nn.softmax(logits, axis=1)
accuracy = float(cpeval.compute_accuracy(probabilities, labels))
accuracies = []
for k in range(classes):
accuracies.append(float(cpeval.compute_conditional_accuracy(
probabilities, labels, labels, k)))
columns = ['accuracy'] + [f'accuracy_{i}' for i in range(classes)]
data = np.array([accuracy] + accuracies)
return pd.DataFrame(np.expand_dims(data, axis=0), columns=columns)
def evaluate_accuracy(model: Dict[str, Any]) -> Dict[str, Any]:
"""Compute accuracy on val/test sets.
Args:
model: dictionary containing val/test logits and labels
Returns:
Accuracies as dictionary split in validation and test results
"""
res = {}
if model['val_labels'].size > 0:
res['val'] = _evaluate_accuracy(model['val_logits'], model['val_labels'])
res['test'] = _evaluate_accuracy(model['test_logits'], model['test_labels'])
return res
def evaluate_coverage(
data: Dict[str, Any], confidence_sets: jnp.ndarray,
labels: jnp.ndarray) -> pd.DataFrame:
"""Compute coverage on validation or test data.
Computes marginal, class- and size-conditional coverages.
Args:
data: data information with groups and loss matrix
confidence_sets: predicted confidence sets
labels: corresponding ground truth labels
Returns:
Results as dictionary
"""
classes = confidence_sets.shape[1]
coverage = float(cpeval.compute_coverage(confidence_sets, labels))
values = {'coverage': coverage}
# Setup groups for which we compute conditional coverage.
groups = {
'class': (labels, classes),
}
for key in data['groups']:
groups[key] = (data['groups'][key][labels],
jnp.max(data['groups'][key]) + 1)
compute_conditional_coverage = jax.jit(cpeval.compute_conditional_coverage)
for key in groups:
group_labels, num_groups = groups[key][0], groups[key][1]
for k in range(num_groups):
coverage_k = float(compute_conditional_coverage(
confidence_sets, labels, group_labels, k))
values['%s_coverage_%d' % (key, k)] = coverage_k
return pd.DataFrame(
np.expand_dims(np.array(list(values.values())), axis=0),
columns=list(values.keys()))
def evaluate_miscoverage(
data: Dict[str, Any], confidence_sets: jnp.ndarray,
labels: jnp.ndarray) -> pd.DataFrame:
"""Compute mis-coverage.
Args:
data: data information with groups and loss matrix
confidence_sets: predicted confidence sets
labels: corresponding ground truth labels
Returns:
Results as dictionary
"""
groups = {}
values = {}
for key in data['groups']:
groups[key] = (data['groups'][key],
jnp.max(data['groups'][key]) + 1)
compute_conditional_miscoverage = jax.jit(
cpeval.compute_conditional_miscoverage)
for key in groups:
group_indices, num_groups = groups[key][0], groups[key][1]
group_labels = group_indices[labels]
# For each example, we need to pick the right labels NOT to be included
# in the confidence sets:
one_hot_labels = (jnp.expand_dims(
group_labels, axis=1) != jnp.expand_dims(group_indices, axis=0))
one_hot_labels = one_hot_labels.astype(int)
miscoverage = float(compute_conditional_miscoverage(
confidence_sets, one_hot_labels,
jnp.zeros(confidence_sets.shape[0]), 0))
values['%s_miscoverage' % key] = miscoverage
values['%s_miscoverage_n' % key] = confidence_sets.shape[0]
for k in range(num_groups):
miscoverage_k = float(compute_conditional_miscoverage(
confidence_sets, one_hot_labels, group_labels, k))
values['%s_miscoverage_%d' % (key, k)] = miscoverage_k
values['%s_miscoverage_%d_n' % (key, k)] = jnp.sum(group_labels == k)
return pd.DataFrame(
np.expand_dims(np.array(list(values.values())), axis=0),
columns=list(values.keys()))
def evaluate_size(
data: Dict[str, Any], confidence_sets: jnp.ndarray,
labels: jnp.ndarray) -> pd.DataFrame:
"""Compute size on validation or test data.
Args:
data: data information with groups and loss matrix
confidence_sets: predicted confidence sets
labels: corresponding ground truth labels
Returns:
Results as dictionary
"""
classes = confidence_sets.shape[1]
size, count = cpeval.compute_size(confidence_sets)
size = float(size)
values = {
'size': size,
'count': count,
}
# Setup groups for which we compute conditional sizes.
groups = {
'class': (labels, classes),
}
for key in data['groups']:
groups[key] = (data['groups'][key][labels],
jnp.max(data['groups'][key]) + 1)
compute_conditional_size = jax.jit(cpeval.compute_conditional_size)
for key in groups:
group_labels, num_groups = groups[key][0], groups[key][1]
for k in range(num_groups):
size_k, count_k = compute_conditional_size(
confidence_sets, group_labels, k)
values['%s_fraction_%d' % (key, k)] = float(count_k)/count
values['%s_size_%d' % (key, k)] = size_k
# Counts per confidence set size.
confidence_set_sizes = jnp.sum(confidence_sets, axis=1)
for k in range(classes):
_, count_k = compute_conditional_size(
confidence_sets, confidence_set_sizes, k)
values['size_%d' % k] = int(count_k)/float(confidence_sets.shape[0])
# Additionally compute cumulative size distribution.
for k in range(classes):
values['cumulative_size_%d' % k] = values['size_%d' % k]
if k > 0:
values['cumulative_size_%d' % k] += values['cumulative_size_%d' % (k - 1)]
return pd.DataFrame(
np.expand_dims(np.array(list(values.values())), axis=0),
columns=list(values.keys()))
def evaluate_confusion(
logits: jnp.ndarray, confidence_sets: jnp.ndarray,
labels: jnp.ndarray) -> pd.DataFrame:
"""Evaluate confusion of confidence sets.
Args:
logits: predicted logits for top-1 prediction
confidence_sets: predicted confidence sets
labels: ground truth labels
Returns:
Confusion matrix
"""
classes = confidence_sets.shape[1]
predictions = jnp.argmax(logits, axis=1)
# Regular classification confusion.
classification_confusion = sklearn.metrics.confusion_matrix(
labels, predictions)
# Confusion in a coverage sense.
coverage_confusion = np.zeros((classes, classes))
for k in range(classes):
coverage_confusion[k] = jnp.sum(confidence_sets[labels == k], axis=0)
# Note that we normalize the confusion matrices as the count is available
# separately.
classification_confusion = classification_confusion / logits.shape[0]
coverage_confusion = coverage_confusion / logits.shape[0]
values = np.expand_dims(np.concatenate((
classification_confusion.flatten(),
coverage_confusion.flatten()
)), axis=0)
columns = []
# Lint does not like double for loops, even in this simple case:
for i in range(classes):
for j in range(classes):
columns.append('classification_confusion_%d_%d' % (i, j))
for i in range(classes):
for j in range(classes):
columns.append('coverage_confusion_%d_%d' % (i, j))
return pd.DataFrame(values, columns=columns)
def evaluate_metrics(
data: Dict[str, Any], logits: jnp.ndarray,
confidence_sets: jnp.ndarray, labels: jnp.ndarray) -> List[pd.DataFrame]:
"""Evaluate metrics on validation or test set.
Args:
data: data information with groups and loss matrix
logits: predicted logits
confidence_sets: predicted confidence sets
labels: ground truth labels
Returns:
List of Panda dataframes containing evaluation metrics
"""
accuracy = _evaluate_accuracy(logits, labels)
coverage = evaluate_coverage(data, confidence_sets, labels)
miscoverage = evaluate_miscoverage(data, confidence_sets, labels)
size = evaluate_size(data, confidence_sets, labels)
results = [accuracy, coverage, miscoverage, size]
confusion = evaluate_confusion(logits, confidence_sets, labels)
results.append(confusion)
return results
def evaluate_conformal_prediction(
model: Dict[str, Any], calibrate_fn: _CalibrateFn, predict_fn: _PredictFn,
trials: int, rng: jnp.ndarray) -> Dict[str, Any]:
"""Evaluate conformal prediction using a calibration and prediction method.
Applies calibration and prediction on trials random splits into validation
and test sets. Returns standard deviation and average accuracy and
coverage metrics.
Calibration and prediction functions need to expect a rng key
as additional argument to allow randomization if possible.
Args:
model: dictionary containing val/test logits and labels
calibrate_fn: callable to use for calibration
predict_fn: callable to use for prediction
trials: number of trials
rng: random key
Returns:
Dictionary of results containing average and standard deviation
of metrics
"""
keys = model.keys()
if 'val_logits' not in keys or 'val_labels' not in keys:
raise ValueError('val_logits or val_labels not present.')
if 'test_logits' not in keys or 'test_labels' not in keys:
raise ValueError('test_logits or test_labels not present.')
rngs = jax.random.split(rng, 3*trials)
val_examples = model['val_labels'].shape[0]
test_examples = model['test_labels'].shape[0]
num_examples = val_examples + test_examples
logits = jnp.concatenate(
(model['val_logits'], model['test_logits']), axis=0)
# Casting explicitly to int as some calibration functions may involve
# indexing which raises a hard ot understand error if labels are not integers.
labels = jnp.concatenate(
(model['val_labels'], model['test_labels']), axis=0).astype(int)
val_results = pd.DataFrame()
test_results = pd.DataFrame()
for t in range(trials):
perm_rng = rngs[3*t + 0]
val_rng = rngs[3*t + 1]
test_rng = rngs[3*t + 2]
perm = jax.random.permutation(perm_rng, jnp.arange(num_examples))
val_logits_t = logits[perm[:val_examples]]
val_labels_t = labels[perm[:val_examples]]
test_logits_t = logits[perm[val_examples:]]
test_labels_t = labels[perm[val_examples:]]
tau = calibrate_fn(val_logits_t, val_labels_t, val_rng)
tau_t = np.array([tau]).reshape(1, -1) # For handling multiple taus.
columns = ['tau' if i == 0 else 'tau_%d' % i for i in range(tau_t.shape[1])]
tau_t = pd.DataFrame(tau_t, columns=columns)
test_confidence_sets_t = predict_fn(test_logits_t, tau, test_rng)
test_results_t = evaluate_metrics(
model['data'], test_logits_t, test_confidence_sets_t, test_labels_t)
test_results_t = pd.concat([tau_t] + test_results_t, axis=1)
test_results = pd.concat((test_results, test_results_t), axis=0)
logging.info('Trial %d: %f', t, tau)
results = {
'mean': {'val': val_results.mean(0), 'test': test_results.mean(0)},
'std': {'val': val_results.std(0), 'test': test_results.std(0)},
}
return results
| conformal_training-main | colab_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment definitions for CIFAR100 experiments."""
from typing import Tuple, Dict, Any, Optional
import ml_collections as collections
import numpy as np
import experiments.experiment_utils as cpeutils
def get_parameters(
experiment: str,
sub_experiment: str,
config: collections.ConfigDict,
) -> Tuple[collections.ConfigDict, Optional[Dict[str, Any]]]:
"""Get parameters for CIFAR100 experiments.
Args:
experiment: experiment to run
sub_experiment: sub experiment, e.g., parameter to tune
config: experiment configuration
Returns:
Training configuration and parameter sweeps
"""
config.architecture = 'resnet'
config.resnet.version = 50
config.resnet.channels = 64 # 256
config.cifar_augmentation = 'standard+autoaugment+cutout'
parameter_sweep = None
groups = (
1, 1, 0, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 1, 0, 1, 1, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
0, 0, 1, 1, 1, 1, 0, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 0, 1, 1, 1, 1, 1, 1, 0, 0,
1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 0, 1, 1, 0, 0, 0, 0, 1, 0,
0, 1, 1, 1, 0, 1, 1, 1, 0, 1,
)
hierarchy = (
4, 1, 14, 8, 0, 6, 7, 7, 18, 3,
3, 14, 9, 18, 7, 11, 3, 9, 7, 11,
6, 11, 5, 10, 7, 6, 13, 15, 3, 15,
0, 11, 1, 10, 12, 14, 16, 9, 11, 5,
5, 19, 8, 8, 15, 13, 14, 17, 18, 10,
16, 4, 17, 4, 2, 0, 17, 4, 18, 17,
10, 3, 2, 12, 12, 16, 12, 1, 9, 19,
2, 10, 0, 1, 16, 12, 9, 13, 15, 13,
16, 19, 2, 4, 6, 19, 5, 5, 8, 19,
18, 1, 2, 15, 6, 0, 17, 8, 14, 13,
)
if experiment == 'models':
config.learning_rate = 0.05
config.batch_size = 100
else:
config.epochs = 50
config.finetune.enabled = True
config.finetune.path = 'cifar100_models_seed0/'
config.finetune.model_state = False
config.finetune.layers = 'res_net/~/logits'
config.finetune.reinitialize = True
if experiment == 'baseline':
config.mode = 'normal'
elif experiment == 'conformal':
config.mode = 'conformal'
config.conformal.coverage_loss = 'none'
config.conformal.loss_transform = 'log'
config.conformal.size_transform = 'identity'
config.conformal.rng = False
if sub_experiment == 'training':
config.learning_rate = 0.005
config.batch_size = 100
config.conformal.temperature = 1.
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.005
elif sub_experiment.find('hierarchy_size_') >= 0:
config.learning_rate = 0.005
config.batch_size = 100
config.conformal.temperature = 1.
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.005
selected_hierarchy = int(sub_experiment.replace('hierarchy_size_', ''))
def cifar100_size_weights(selected_group, selected_weight, num_groups):
"""Helper to define size weights for hierarchy weight manipulation."""
weights = np.ones(num_groups)
weights[selected_group] = selected_weight
return tuple(weights)
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 1.1, 20)),
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 1.25, 20)),
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 1.5, 20)),
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 2, 20)),
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 3, 20)),
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 4, 20)),
cpeutils.size_weights_group(
hierarchy,
cifar100_size_weights(selected_hierarchy, 5, 20)),
],
}
elif sub_experiment == 'group_zero':
config.learning_rate = 0.005
config.batch_size = 100
config.conformal.temperature = 1.
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_group_zero(0.01, 1, groups, 100),
cpeutils.loss_matrix_group_zero(0.05, 1, groups, 100),
cpeutils.loss_matrix_group_zero(0.1, 1, groups, 100),
cpeutils.loss_matrix_group_zero(0.5, 1, groups, 100),
cpeutils.loss_matrix_group_zero(1, 1, groups, 100),
],
}
elif sub_experiment == 'group_one':
config.learning_rate = 0.005
config.batch_size = 100
config.conformal.temperature = 1.
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_group_one(0.01, 1, groups, 100),
cpeutils.loss_matrix_group_one(0.05, 1, groups, 100),
cpeutils.loss_matrix_group_one(0.1, 1, groups, 100),
cpeutils.loss_matrix_group_one(0.5, 1, groups, 100),
cpeutils.loss_matrix_group_one(1, 1, groups, 100),
],
}
else:
raise ValueError('Invalid conformal sub experiment.')
else:
raise ValueError('Experiment not implemented.')
return config, parameter_sweep
| conformal_training-main | experiments/run_cifar100.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for experiments."""
from typing import Sequence
import numpy as np
def loss_matrix_singleton_zero(
off: float, on: float, singleton: int, classes: int) -> Sequence[float]:
"""Loss matrix to discourage overlap with a single class.
Creates a classes x classes loss matrix where the elements
k, singleton are set to off for all k != singleton in [0, classes-1].
Args:
off: off-diagonal value to set
on: on-diagonal value to set
singleton: class to discourage overlap with
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
loss_matrix = np.eye(classes) * on
loss_matrix[:, singleton] = off
np.fill_diagonal(loss_matrix, on)
return tuple(loss_matrix.flatten())
def loss_matrix_singleton_one(
off: float, on: float, singleton: int, classes: int) -> Sequence[float]:
"""Loss matrix to discourage overlap with all other classes.
Creates a classes x classes loss matrix where the elements
k, singleton are set to off for all k != singleton in [0, classes-1].
Args:
off: off-diagonal value to set
on: on-diagonal value to set
singleton: class to discourage overlap with
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
loss_matrix = np.eye(classes) * on
loss_matrix[singleton, :] = off
loss_matrix[singleton, singleton] = on
return tuple(loss_matrix.flatten())
def loss_matrix_group_zero(
off: float, on: float,
groups: Sequence[int], classes: int) -> Sequence[float]:
"""Discourage confidence sets of group 0 to contain group 1 classes.
Creates a loss matrix that discourages overlap between two groups of classes.
We penalize confidence sets of group 0 to contain classes of group 1.
Args:
off: off-diagonal value to set
on: on-diagonal value to set
groups: group index for each class
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
groups = np.array(groups)
loss_matrix = np.eye(classes) * on
true_indices = np.where(groups == 0)[0]
pred_indices = np.where(groups == 1)[0]
loss_matrix[np.ix_(true_indices, pred_indices)] = off
np.fill_diagonal(loss_matrix, on)
return tuple(loss_matrix.flatten())
def loss_matrix_group_one(
off: float, on: float,
groups: Sequence[int], classes: int) -> Sequence[float]:
"""Discourage confidence sets of group 1 to contain group 0 classes.
Opposite of loss_matrix_group_zero.
Args:
off: off-diagonal value to set
on: on-diagonal value to set
groups: group index for each class
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
groups = np.array(groups)
loss_matrix = np.eye(classes) * on
true_indices = np.where(groups == 1)[0]
pred_indices = np.where(groups == 0)[0]
loss_matrix[np.ix_(true_indices, pred_indices)] = off
np.fill_diagonal(loss_matrix, on)
return tuple(loss_matrix.flatten())
def loss_matrix_importance(
weights: Sequence[float], classes: int) -> Sequence[float]:
"""Loss matrix with different weights on diagonal.
Creates a diagonal loss matrix with the given weights on the diagonal.
Args:
weights: on-diagonal weights
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
loss_matrix = np.eye(classes)
np.fill_diagonal(loss_matrix, np.array(weights))
return tuple(loss_matrix.flatten())
def loss_matrix_confusion(
class_a: int, class_b: int, off_a_b: float, off_b_a: float,
on: float, classes: int) -> Sequence[float]:
"""Loss matrix to penalize confusion between two classes.
Creates a loss matrix to discourage confusion between classes a and b using
the off-diagonal weights off_a_b and off_b_a and the on-diagonal weight on.
Args:
class_a: first class
class_b: second class
off_a_b: penalty of including class_b in confidence sets of class_a
off_b_a: penalty of including class_a in confidence sets of class_b
on: on-diagonal value
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
loss_matrix = np.eye(classes) * on
loss_matrix[class_a, class_b] = off_a_b
loss_matrix[class_b, class_a] = off_b_a
return tuple(loss_matrix.flatten())
def loss_matrix_confusion_triple(
class_a: int, class_b: int, class_c: int,
off: float, on: float, classes: int) -> Sequence[float]:
"""Loss matrix to penalize confusion between three classes.
Loss_matrix_confusion for three pairs of classes using the same off-diagonal
weight for all combinations.
Args:
class_a: first class
class_b: second class
class_c: third class
off: off-diagonal penalty to use
on: on-diagonal value
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
loss_matrix = np.eye(classes) * on
# Example: 4, 5, 7, pairs (4, 5), (5, 4), (4, 7), (7, 4), (5, 7), (7, 5)
loss_matrix[class_a, class_b] = off
loss_matrix[class_a, class_c] = off
loss_matrix[class_b, class_a] = off
loss_matrix[class_b, class_c] = off
loss_matrix[class_c, class_a] = off
loss_matrix[class_c, class_b] = off
return tuple(loss_matrix.flatten())
def loss_matrix_confusion_row(
selected_class: int, off: float, on: float,
classes: int) -> Sequence[float]:
"""Loss matrix to penalize confusion for one class with all others.
Loss_matrix_confusion for a full row of the coverage confusion matrix.
That is, we penalize the confidence sets of selected_class to include
any other class.
Args:
selected_class: class or row in coverage confusion matrix
off: off-diagonal weight to apply
on: on-diagonal value
classes: number of classes
Returns:
Flattened loss matrix as tuple
"""
loss_matrix = np.eye(classes) * on
loss_matrix[selected_class, :] = off
return tuple(loss_matrix.flatten())
def size_weights_group(
groups: Sequence[int], weights: Sequence[float]) -> Sequence[float]:
"""Helper to set up class size weights.
Define class weights for multiple groups of classes.
Args:
groups: group index per class
weights: weight for each group
Returns:
Size weights as tuple
"""
groups = np.array(groups)
weights = np.array(weights)
unique_groups = np.unique(groups)
if unique_groups.size != weights.size:
raise ValueError('Invalid groups or weights.')
size_weights = np.zeros(groups.shape)
for group, weight in zip(unique_groups, weights):
size_weights[groups == group] = weight
return tuple(size_weights)
def size_weights_selected(
selected_classes: Sequence[int],
weight: float, classes: int) -> Sequence[float]:
"""Helper to set up class size weights.
Obtain size weights where the weight of the selected classes is weight
and all others are 1.
Args:
selected_classes: classes to set the given size weight
weight: size weight to apply
classes: number of classes
Returns:
Size weights as tuple
"""
selected_classes = np.array(selected_classes)
size_weights = np.ones(classes)
size_weights[selected_classes] = weight
return tuple(size_weights)
| conformal_training-main | experiments/experiment_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment definitions for MNIST."""
from typing import Tuple, Dict, Any, Optional
import ml_collections as collections
import experiments.experiment_utils as cpeutils
def get_parameters(
experiment: str,
sub_experiment: str,
config: collections.ConfigDict,
) -> Tuple[collections.ConfigDict, Optional[Dict[str, Any]]]:
"""Get parameters for MNIST experiments.
Args:
experiment: experiment to run
sub_experiment: sub experiment, e.g., parameter to tune
config: experiment configuration
Returns:
Training configuration and parameter sweeps
"""
config.architecture = 'mlp'
config.mlp.layers = 0
config.mlp.units = 32
config.epochs = 50
parameter_sweep = None
groups = (0, 1, 0, 1, 0, 1, 0, 1, 0, 1)
if experiment == 'models':
config.learning_rate = 0.05
config.batch_size = 100
elif experiment == 'conformal':
config.mode = 'conformal'
config.conformal.coverage_loss = 'none'
config.conformal.loss_transform = 'log'
config.conformal.size_transform = 'identity'
config.conformal.rng = False
if sub_experiment == 'training':
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
elif sub_experiment == 'group_zero':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 1
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_group_zero(0.01, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.05, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.1, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.5, 1, groups, 10),
cpeutils.loss_matrix_group_zero(1, 1, groups, 10),
],
}
elif sub_experiment == 'group_one':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 1
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_group_one(0.01, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.05, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.1, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.5, 1, groups, 10),
cpeutils.loss_matrix_group_one(1, 1, groups, 10),
],
}
elif sub_experiment == 'singleton_zero':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 1
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_singleton_zero(0.01, 1, 2, 10),
cpeutils.loss_matrix_singleton_zero(0.05, 1, 2, 10),
cpeutils.loss_matrix_singleton_zero(0.1, 1, 2, 10),
cpeutils.loss_matrix_singleton_zero(0.5, 1, 2, 10),
cpeutils.loss_matrix_singleton_zero(1, 1, 2, 10),
],
}
elif sub_experiment == 'singleton_one':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 1
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_singleton_one(0.01, 1, 2, 10),
cpeutils.loss_matrix_singleton_one(0.05, 1, 2, 10),
cpeutils.loss_matrix_singleton_one(0.1, 1, 2, 10),
cpeutils.loss_matrix_singleton_one(0.5, 1, 2, 10),
cpeutils.loss_matrix_singleton_one(1, 1, 2, 10),
],
}
elif sub_experiment == 'group_size_0':
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_group(groups, (1.1, 1)),
cpeutils.size_weights_group(groups, (1.25, 1)),
cpeutils.size_weights_group(groups, (1.5, 1)),
cpeutils.size_weights_group(groups, (2, 1)),
cpeutils.size_weights_group(groups, (3, 1)),
cpeutils.size_weights_group(groups, (4, 1)),
cpeutils.size_weights_group(groups, (5, 1)),
],
}
elif sub_experiment == 'group_size_1':
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_group(groups, (1, 1.1)),
cpeutils.size_weights_group(groups, (1, 1.25)),
cpeutils.size_weights_group(groups, (1, 1.5)),
cpeutils.size_weights_group(groups, (1, 2)),
cpeutils.size_weights_group(groups, (1, 3)),
cpeutils.size_weights_group(groups, (1, 4)),
cpeutils.size_weights_group(groups, (1, 5)),
],
}
elif sub_experiment.find('class_size_') >= 0:
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
selected_class = int(sub_experiment.replace('class_size_', ''))
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_selected([selected_class], 0, 10),
cpeutils.size_weights_selected([selected_class], 0.1, 10),
cpeutils.size_weights_selected([selected_class], 0.5, 10),
cpeutils.size_weights_selected([selected_class], 1, 10),
cpeutils.size_weights_selected([selected_class], 2, 10),
cpeutils.size_weights_selected([selected_class], 5, 10),
cpeutils.size_weights_selected([selected_class], 10, 10),
],
}
else:
raise ValueError('Invalid conformal sub experiment.')
else:
raise ValueError('Experiment not implemented.')
return config, parameter_sweep
| conformal_training-main | experiments/run_mnist.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Launch definitions for EMNIST/byClass."""
from typing import Tuple, Dict, Any, Optional
import ml_collections as collections
def get_parameters(
experiment: str,
unused_sub_experiment: str,
config: collections.ConfigDict,
) -> Tuple[collections.ConfigDict, Optional[Dict[str, Any]]]:
"""Get parameters for MNIST experiments.
Args:
experiment: experiment to run
unused_sub_experiment: sub experiment, e.g., parameter to tune
config: experiment configuration
Returns:
Training configuration and parameter sweeps
"""
config.epochs = 75
config.architecture = 'mlp'
config.mlp.layers = 2
config.mlp.units = 128
config.cnn.channels = 32
# We adjust the number of validation examples to the number of classes.
config.val_examples = 52 * 100 # 4700 balanced, 5200 byClass.
# For large batch sizes parts of validation/test sets might be
# missing otherwise.
parameter_sweep = None
if experiment == 'models':
config.learning_rate = 0.05
config.batch_size = 100
elif experiment == 'conformal':
config.mode = 'conformal'
config.conformal.coverage_loss = 'none'
config.conformal.loss_transform = 'log'
config.conformal.size_transform = 'identity'
config.conformal.rng = False
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 1.
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
else:
raise ValueError('Experiment not implemented.')
return config, parameter_sweep
| conformal_training-main | experiments/run_emnist_byclass.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Launch definitions for WineQuality."""
from typing import Tuple, Dict, Any, Optional
import ml_collections as collections
import experiments.experiment_utils as cpeutils
def get_parameters(
experiment: str,
sub_experiment: str,
config: collections.ConfigDict,
) -> Tuple[collections.ConfigDict, Optional[Dict[str, Any]]]:
"""Get parameters for Wine Quality experiments.
Args:
experiment: experiment to run
sub_experiment: sub experiment, e.g., parameter to tune
config: experiment configuration
Returns:
Training configuration and parameter sweeps
"""
config.architecture = 'mlp'
config.mlp.layers = 2
config.mlp.units = 256
config.learning_rate = 0.01
config.val_examples = 500
config.epochs = 100
config.checkpoint_frequency = 10
parameter_sweep = None
if experiment == 'models':
config.learning_rate = 0.1
config.batch_size = 500
elif experiment == 'conformal':
config.mode = 'conformal'
config.conformal.coverage_loss = 'none'
config.conformal.loss_transform = 'log'
config.conformal.size_transform = 'identity'
config.conformal.rng = False
if sub_experiment == 'training':
config.learning_rate = 0.005
config.batch_size = 100
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.05
elif sub_experiment == 'importance_0':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_p'
config.conformal.size_weight = 1.
config.conformal.size_loss = 'valid'
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_importance((1, 0.25), 2),
cpeutils.loss_matrix_importance((1, 0.5), 2),
cpeutils.loss_matrix_importance((1.25, 1), 2),
cpeutils.loss_matrix_importance((1.5, 1), 2),
cpeutils.loss_matrix_importance((2, 1), 2),
cpeutils.loss_matrix_importance((4, 1), 2),
],
}
elif sub_experiment == 'importance_1':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_p'
config.conformal.size_weight = 1.
config.conformal.size_loss = 'valid'
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_importance((0.25, 1), 2),
cpeutils.loss_matrix_importance((0.5, 1), 2),
cpeutils.loss_matrix_importance((1, 1.25), 2),
cpeutils.loss_matrix_importance((1, 1.5), 2),
cpeutils.loss_matrix_importance((1, 2), 2),
cpeutils.loss_matrix_importance((1, 4), 2),
],
}
elif sub_experiment == 'confusion_1_0':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_p'
config.conformal.size_weight = 1.
config.conformal.size_loss = 'valid'
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_confusion(0, 1, 0, 0.01, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0, 0.05, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0, 0.1, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0, 0.5, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0, 1, 1, 2),
],
}
elif sub_experiment == 'confusion_0_1':
config.conformal.coverage_loss = 'classification'
config.learning_rate = 0.05
config.batch_size = 500
config.conformal.temperature = 0.5
config.conformal.method = 'threshold_p'
config.conformal.size_weight = 1.
config.conformal.size_loss = 'valid'
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_confusion(0, 1, 0.01, 0, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0.05, 0, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0.1, 0, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 0.5, 0, 1, 2),
cpeutils.loss_matrix_confusion(0, 1, 1, 0, 1, 2),
],
}
else:
raise ValueError('Invalid conformal sub experiment.')
else:
raise ValueError('Experiment not implemented.')
return config, parameter_sweep
| conformal_training-main | experiments/run_wine_quality.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiments configuration."""
| conformal_training-main | experiments/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Launch definitions for paper experiments."""
from typing import Tuple, Dict, Any, Optional
import ml_collections as collections
import experiments.experiment_utils as cpeutils
def get_parameters(
experiment: str,
sub_experiment: str,
config: collections.ConfigDict,
) -> Tuple[collections.ConfigDict, Optional[Dict[str, Any]]]:
"""Get parameters for Fashion-MNIST experiments.
Args:
experiment: experiment to run
sub_experiment: sub experiment, e.g., parameter to tune
config: experiment configuration
Returns:
Training configuration and parameter sweeps
"""
config.architecture = 'mlp'
config.mlp.layers = 2
config.cnn.channels = 128
config.resnet.version = 18
parameter_sweep = None
if experiment == 'models':
config.learning_rate = 0.05
config.batch_size = 100
elif experiment == 'conformal':
config.mode = 'conformal'
config.conformal.coverage_loss = 'none'
config.conformal.loss_transform = 'log'
config.conformal.size_transform = 'identity'
config.conformal.rng = False
if sub_experiment == 'training':
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 0.1
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.01
elif sub_experiment == 'confusion_4_6':
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 0.1
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_confusion(4, 6, 0.01, 0.01, 1, 10),
cpeutils.loss_matrix_confusion(4, 6, 0.05, 0.05, 1, 10),
cpeutils.loss_matrix_confusion(4, 6, 0.1, 0.1, 1, 10),
cpeutils.loss_matrix_confusion(4, 6, 0.5, 0.5, 1, 10),
cpeutils.loss_matrix_confusion(4, 6, 1, 1, 1, 10),
],
}
elif sub_experiment == 'confusion_2_4_6':
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 0.1
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_confusion_triple(2, 4, 6, 0.01, 1, 10),
cpeutils.loss_matrix_confusion_triple(2, 4, 6, 0.05, 1, 10),
cpeutils.loss_matrix_confusion_triple(2, 4, 6, 0.1, 1, 10),
cpeutils.loss_matrix_confusion_triple(2, 4, 6, 0.5, 1, 10),
cpeutils.loss_matrix_confusion_triple(2, 4, 6, 1, 1, 10),
],
}
elif sub_experiment == 'confusion_6':
config.learning_rate = 0.01
config.batch_size = 100
config.conformal.temperature = 0.1
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.5
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_confusion_row(6, 0.01, 1, 10),
cpeutils.loss_matrix_confusion_row(6, 0.05, 1, 10),
cpeutils.loss_matrix_confusion_row(6, 0.1, 1, 10),
cpeutils.loss_matrix_confusion_row(6, 0.5, 1, 10),
cpeutils.loss_matrix_confusion_row(6, 1, 1, 10),
],
}
else:
raise ValueError('Invalid conformal sub experiment.')
else:
raise ValueError('Experiment not implemented.')
return config, parameter_sweep
| conformal_training-main | experiments/run_fashion_mnist.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment definitions for CIFAR10."""
from typing import Tuple, Dict, Any, Optional
import ml_collections as collections
import experiments.experiment_utils as cpeutils
def get_parameters(
experiment: str,
sub_experiment: str,
config: collections.ConfigDict,
) -> Tuple[collections.ConfigDict, Optional[Dict[str, Any]]]:
"""Get parameters for CIFAR10 experiments.
Args:
experiment: experiment to run
sub_experiment: sub experiment, e.g., parameter to tune
config: experiment configuration
Returns:
Training configuration and parameter sweeps
"""
config.architecture = 'resnet'
config.resnet.version = 34
config.resnet.channels = 4
config.cifar_augmentation = 'standard'
parameter_sweep = None
groups = (0, 0, 1, 1, 1, 1, 1, 1, 0, 0)
# Training from scratch:
if experiment == 'models':
config.whitening = True
config.cifar_augmentation = 'standard'
# Fine-tuning
else:
config.epochs = 50
config.finetune.enabled = True
config.finetune.path = 'cifar10_models_seed0/'
config.finetune.model_state = False
config.finetune.layers = 'res_net/~/logits'
config.finetune.reinitialize = True
if experiment == 'baseline':
config.mode = 'normal'
elif experiment == 'conformal':
config.mode = 'conformal'
config.conformal.coverage_loss = 'none'
config.conformal.loss_transform = 'log'
config.conformal.size_transform = 'identity'
config.conformal.rng = False
if sub_experiment == 'training':
config.learning_rate = 0.01
config.batch_size = 500
config.conformal.temperature = 1.
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.05
elif sub_experiment == 'group_zero':
config.learning_rate = 0.005
config.batch_size = 500
config.conformal.temperature = 0.1
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 1.
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_group_zero(0.001, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.005, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.01, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.05, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.1, 1, groups, 10),
cpeutils.loss_matrix_group_zero(0.5, 1, groups, 10),
cpeutils.loss_matrix_group_zero(1, 1, groups, 10),
cpeutils.loss_matrix_group_zero(2, 1, groups, 10),
],
}
elif sub_experiment == 'group_one':
config.learning_rate = 0.005
config.batch_size = 500
config.conformal.temperature = 0.1
config.conformal.coverage_loss = 'classification'
config.conformal.size_loss = 'valid'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 1.
parameter_sweep = {
'key': 'conformal.loss_matrix',
'values': [
cpeutils.loss_matrix_group_one(0.001, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.005, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.01, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.05, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.1, 1, groups, 10),
cpeutils.loss_matrix_group_one(0.5, 1, groups, 10),
cpeutils.loss_matrix_group_one(1, 1, groups, 10),
cpeutils.loss_matrix_group_one(2, 1, groups, 10),
],
}
elif sub_experiment == 'group_size_0':
config.learning_rate = 0.01
config.batch_size = 500
config.conformal.temperature = 1.
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.05
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_group(groups, (1.1, 1)),
cpeutils.size_weights_group(groups, (1.25, 1)),
cpeutils.size_weights_group(groups, (1.5, 1)),
cpeutils.size_weights_group(groups, (2, 1)),
cpeutils.size_weights_group(groups, (3, 1)),
cpeutils.size_weights_group(groups, (4, 1)),
cpeutils.size_weights_group(groups, (5, 1)),
],
}
elif sub_experiment == 'group_size_1':
config.learning_rate = 0.01
config.batch_size = 500
config.conformal.temperature = 1.
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.05
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_group(groups, (1, 1.1)),
cpeutils.size_weights_group(groups, (1, 1.25)),
cpeutils.size_weights_group(groups, (1, 1.5)),
cpeutils.size_weights_group(groups, (1, 2)),
cpeutils.size_weights_group(groups, (1, 3)),
cpeutils.size_weights_group(groups, (1, 4)),
cpeutils.size_weights_group(groups, (1, 5)),
],
}
elif sub_experiment.find('class_size_') >= 0:
config.learning_rate = 0.01
config.batch_size = 500
config.conformal.temperature = 1.
config.conformal.size_loss = 'normal'
config.conformal.method = 'threshold_logp'
config.conformal.size_weight = 0.05
selected_class = int(sub_experiment.replace('class_size_', ''))
parameter_sweep = {
'key': 'conformal.size_weights',
'values': [
cpeutils.size_weights_selected([selected_class], 0, 10),
cpeutils.size_weights_selected([selected_class], 0.1, 10),
cpeutils.size_weights_selected([selected_class], 0.5, 10),
cpeutils.size_weights_selected([selected_class], 1, 10),
cpeutils.size_weights_selected([selected_class], 2, 10),
cpeutils.size_weights_selected([selected_class], 5, 10),
cpeutils.size_weights_selected([selected_class], 10, 10),
],
}
else:
raise ValueError('Invalid conformal sub experiment.')
else:
raise ValueError('Experiment not implemented.')
return config, parameter_sweep
| conformal_training-main | experiments/run_cifar10.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loads a sample video and classifies using a trained Kinetics checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import i3d
_IMAGE_SIZE = 224
_SAMPLE_VIDEO_FRAMES = 79
_SAMPLE_PATHS = {
'rgb': 'data/v_CricketShot_g04_c01_rgb.npy',
'flow': 'data/v_CricketShot_g04_c01_flow.npy',
}
_CHECKPOINT_PATHS = {
'rgb': 'data/checkpoints/rgb_scratch/model.ckpt',
'rgb600': 'data/checkpoints/rgb_scratch_kin600/model.ckpt',
'flow': 'data/checkpoints/flow_scratch/model.ckpt',
'rgb_imagenet': 'data/checkpoints/rgb_imagenet/model.ckpt',
'flow_imagenet': 'data/checkpoints/flow_imagenet/model.ckpt',
}
_LABEL_MAP_PATH = 'data/label_map.txt'
_LABEL_MAP_PATH_600 = 'data/label_map_600.txt'
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('eval_type', 'joint', 'rgb, rgb600, flow, or joint')
tf.flags.DEFINE_boolean('imagenet_pretrained', True, '')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
eval_type = FLAGS.eval_type
imagenet_pretrained = FLAGS.imagenet_pretrained
NUM_CLASSES = 400
if eval_type == 'rgb600':
NUM_CLASSES = 600
if eval_type not in ['rgb', 'rgb600', 'flow', 'joint']:
raise ValueError('Bad `eval_type`, must be one of rgb, rgb600, flow, joint')
if eval_type == 'rgb600':
kinetics_classes = [x.strip() for x in open(_LABEL_MAP_PATH_600)]
else:
kinetics_classes = [x.strip() for x in open(_LABEL_MAP_PATH)]
if eval_type in ['rgb', 'rgb600', 'joint']:
# RGB input has 3 channels.
rgb_input = tf.placeholder(
tf.float32,
shape=(1, _SAMPLE_VIDEO_FRAMES, _IMAGE_SIZE, _IMAGE_SIZE, 3))
with tf.variable_scope('RGB'):
rgb_model = i3d.InceptionI3d(
NUM_CLASSES, spatial_squeeze=True, final_endpoint='Logits')
rgb_logits, _ = rgb_model(
rgb_input, is_training=False, dropout_keep_prob=1.0)
rgb_variable_map = {}
for variable in tf.global_variables():
if variable.name.split('/')[0] == 'RGB':
if eval_type == 'rgb600':
rgb_variable_map[variable.name.replace(':0', '')[len('RGB/inception_i3d/'):]] = variable
else:
rgb_variable_map[variable.name.replace(':0', '')] = variable
rgb_saver = tf.train.Saver(var_list=rgb_variable_map, reshape=True)
if eval_type in ['flow', 'joint']:
# Flow input has only 2 channels.
flow_input = tf.placeholder(
tf.float32,
shape=(1, _SAMPLE_VIDEO_FRAMES, _IMAGE_SIZE, _IMAGE_SIZE, 2))
with tf.variable_scope('Flow'):
flow_model = i3d.InceptionI3d(
NUM_CLASSES, spatial_squeeze=True, final_endpoint='Logits')
flow_logits, _ = flow_model(
flow_input, is_training=False, dropout_keep_prob=1.0)
flow_variable_map = {}
for variable in tf.global_variables():
if variable.name.split('/')[0] == 'Flow':
flow_variable_map[variable.name.replace(':0', '')] = variable
flow_saver = tf.train.Saver(var_list=flow_variable_map, reshape=True)
if eval_type == 'rgb' or eval_type == 'rgb600':
model_logits = rgb_logits
elif eval_type == 'flow':
model_logits = flow_logits
else:
model_logits = rgb_logits + flow_logits
model_predictions = tf.nn.softmax(model_logits)
with tf.Session() as sess:
feed_dict = {}
if eval_type in ['rgb', 'rgb600', 'joint']:
if imagenet_pretrained:
rgb_saver.restore(sess, _CHECKPOINT_PATHS['rgb_imagenet'])
else:
rgb_saver.restore(sess, _CHECKPOINT_PATHS[eval_type])
tf.logging.info('RGB checkpoint restored')
rgb_sample = np.load(_SAMPLE_PATHS['rgb'])
tf.logging.info('RGB data loaded, shape=%s', str(rgb_sample.shape))
feed_dict[rgb_input] = rgb_sample
if eval_type in ['flow', 'joint']:
if imagenet_pretrained:
flow_saver.restore(sess, _CHECKPOINT_PATHS['flow_imagenet'])
else:
flow_saver.restore(sess, _CHECKPOINT_PATHS['flow'])
tf.logging.info('Flow checkpoint restored')
flow_sample = np.load(_SAMPLE_PATHS['flow'])
tf.logging.info('Flow data loaded, shape=%s', str(flow_sample.shape))
feed_dict[flow_input] = flow_sample
out_logits, out_predictions = sess.run(
[model_logits, model_predictions],
feed_dict=feed_dict)
out_logits = out_logits[0]
out_predictions = out_predictions[0]
sorted_indices = np.argsort(out_predictions)[::-1]
print('Norm of logits: %f' % np.linalg.norm(out_logits))
print('\nTop classes and probabilities')
for index in sorted_indices[:20]:
print(out_predictions[index], out_logits[index], kinetics_classes[index])
if __name__ == '__main__':
tf.app.run(main)
| kinetics-i3d-master | evaluate_sample.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for I3D model code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import i3d
_IMAGE_SIZE = 224
_NUM_CLASSES = 400
class I3dTest(tf.test.TestCase):
"""Test of Inception I3D model, without real data."""
def testModelShapesWithSqueeze(self):
"""Test shapes after running some fake data through the model."""
i3d_model = i3d.InceptionI3d(
num_classes=_NUM_CLASSES, final_endpoint='Predictions')
inp = tf.placeholder(tf.float32, [None, 64, _IMAGE_SIZE, _IMAGE_SIZE, 3])
predictions, end_points = i3d_model(
inp, is_training=True, dropout_keep_prob=0.5)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
sample_input = np.zeros((5, 64, _IMAGE_SIZE, _IMAGE_SIZE, 3))
out_predictions, out_logits = sess.run(
[predictions, end_points['Logits']], {inp: sample_input})
self.assertEqual(out_predictions.shape, (5, _NUM_CLASSES))
self.assertEqual(out_logits.shape, (5, _NUM_CLASSES))
def testModelShapesWithoutSqueeze(self):
"""Test that turning off `spatial_squeeze` changes the output shape.
Also try setting different values for `dropout_keep_prob` and snt.BatchNorm
`is_training`.
"""
i3d_model = i3d.InceptionI3d(
num_classes=_NUM_CLASSES, spatial_squeeze=False,
final_endpoint='Predictions')
inp = tf.placeholder(tf.float32, [None, 64, _IMAGE_SIZE, _IMAGE_SIZE, 3])
predictions, end_points = i3d_model(
inp, is_training=False, dropout_keep_prob=1.0)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
sample_input = np.zeros((5, 64, _IMAGE_SIZE, _IMAGE_SIZE, 3))
out_predictions, out_logits = sess.run(
[predictions, end_points['Logits']], {inp: sample_input})
self.assertEqual(out_predictions.shape, (5, 1, 1, _NUM_CLASSES))
self.assertEqual(out_logits.shape, (5, 1, 1, _NUM_CLASSES))
def testInitErrors(self):
# Invalid `final_endpoint` string.
with self.assertRaises(ValueError):
_ = i3d.InceptionI3d(
num_classes=_NUM_CLASSES, final_endpoint='Conv3d_1a_8x8')
# Dropout keep probability must be in (0, 1].
i3d_model = i3d.InceptionI3d(num_classes=_NUM_CLASSES)
inp = tf.placeholder(tf.float32, [None, 64, _IMAGE_SIZE, _IMAGE_SIZE, 3])
with self.assertRaises(ValueError):
_, _ = i3d_model(inp, is_training=False, dropout_keep_prob=0)
# Height and width dimensions of the input should be _IMAGE_SIZE.
i3d_model = i3d.InceptionI3d(num_classes=_NUM_CLASSES)
inp = tf.placeholder(tf.float32, [None, 64, 10, 10, 3])
with self.assertRaises(ValueError):
_, _ = i3d_model(inp, is_training=False, dropout_keep_prob=0.5)
if __name__ == '__main__':
tf.test.main()
| kinetics-i3d-master | i3d_test.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Inception-v1 Inflated 3D ConvNet used for Kinetics CVPR paper.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow as tf
class Unit3D(snt.AbstractModule):
"""Basic unit containing Conv3D + BatchNorm + non-linearity."""
def __init__(self, output_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
activation_fn=tf.nn.relu,
use_batch_norm=True,
use_bias=False,
name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__(name=name)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
def _build(self, inputs, is_training):
"""Connects the module to inputs.
Args:
inputs: Inputs to the Unit3D component.
is_training: whether to use training mode for snt.BatchNorm (boolean).
Returns:
Outputs from the module.
"""
net = snt.Conv3D(output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=snt.SAME,
use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
net = bn(net, is_training=is_training, test_local_stats=False)
if self._activation_fn is not None:
net = self._activation_fn(net)
return net
class InceptionI3d(snt.AbstractModule):
"""Inception-v1 I3D architecture.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
See also the Inception architecture, introduced in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
"""
# Endpoints of the model in order. During construction, all the endpoints up
# to a designated `final_endpoint` are returned in a dictionary as the
# second return value.
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Logits',
'Predictions',
)
def __init__(self, num_classes=400, spatial_squeeze=True,
final_endpoint='Logits', name='inception_i3d'):
"""Initializes I3D model instance.
Args:
num_classes: The number of outputs in the logit layer (default 400, which
matches the Kinetics dataset).
spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
before returning (default True).
final_endpoint: The model contains many possible endpoints.
`final_endpoint` specifies the last endpoint for the model to be built
up to. In addition to the output at `final_endpoint`, all the outputs
at endpoints up to `final_endpoint` will also be returned, in a
dictionary. `final_endpoint` must be one of
InceptionI3d.VALID_ENDPOINTS (default 'Logits').
name: A string (optional). The name of this module.
Raises:
ValueError: if `final_endpoint` is not recognized.
"""
if final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % final_endpoint)
super(InceptionI3d, self).__init__(name=name)
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
def _build(self, inputs, is_training, dropout_keep_prob=1.0):
"""Connects the model to inputs.
Args:
inputs: Inputs to the model, which should have dimensions
`batch_size` x `num_frames` x 224 x 224 x `num_channels`.
is_training: whether to use training mode for snt.BatchNorm (boolean).
dropout_keep_prob: Probability for the tf.nn.dropout layer (float in
[0, 1)).
Returns:
A tuple consisting of:
1. Network output at location `self._final_endpoint`.
2. Dictionary containing all endpoints up to `self._final_endpoint`,
indexed by endpoint name.
Raises:
ValueError: if `self._final_endpoint` is not recognized.
"""
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % self._final_endpoint)
net = inputs
end_points = {}
end_point = 'Conv3d_1a_7x7'
net = Unit3D(output_channels=64, kernel_shape=[7, 7, 7],
stride=[2, 2, 2], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_2a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Conv3d_2b_1x1'
net = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Conv3d_2c_3x3'
net = Unit3D(output_channels=192, kernel_shape=[3, 3, 3],
name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_3a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=32, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=192, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=96, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_4a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=208, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=48, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=224, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=256, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=144, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=288, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_5a_2x2'
net = tf.nn.max_pool3d(net, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0a_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=384, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=384, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=48, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Logits'
with tf.variable_scope(end_point):
net = tf.nn.avg_pool3d(net, ksize=[1, 2, 7, 7, 1],
strides=[1, 1, 1, 1, 1], padding=snt.VALID)
net = tf.nn.dropout(net, dropout_keep_prob)
logits = Unit3D(output_channels=self._num_classes,
kernel_shape=[1, 1, 1],
activation_fn=None,
use_batch_norm=False,
use_bias=True,
name='Conv3d_0c_1x1')(net, is_training=is_training)
if self._spatial_squeeze:
logits = tf.squeeze(logits, [2, 3], name='SpatialSqueeze')
averaged_logits = tf.reduce_mean(logits, axis=1)
end_points[end_point] = averaged_logits
if self._final_endpoint == end_point: return averaged_logits, end_points
end_point = 'Predictions'
predictions = tf.nn.softmax(averaged_logits)
end_points[end_point] = predictions
return predictions, end_points
| kinetics-i3d-master | i3d.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for MuJoCo MPC."""
import os
import pathlib
import platform
import shutil
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
import subprocess
Path = pathlib.Path
class GenerateProtoGrpcCommand(setuptools.Command):
"""Specialized setup command to handle batch proto compilation.
Generates the `batch_pb2{_grpc}.py` files from `batch_proto`. Assumes that
`grpc_tools.protoc` is installed.
"""
description = "Generate `.proto` files to Python protobuf and gRPC files."
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
def run(self):
"""Generate `batch.proto` into `batch_pb2{_grpc}.py`.
This function looks more complicated than what it has to be because the
`protoc` generator is very particular in the way it generates the imports
for the generated `batch_pb2_grpc.py` file. The final argument of the
`protoc` call has to be "mujoco_mpc/batch.proto" in order for the import to
become `from mujoco_mpc import [batch_pb2_proto_import]` instead of just
`import [batch_pb2_proto_import]`. The latter would fail because the name is
meant to be relative but python3 interprets it as an absolute import.
"""
# We import here because, if the import is at the top of this file, we
# cannot resolve the dependencies without having `grpcio-tools` installed.
from grpc_tools import protoc # pylint: disable=import-outside-toplevel
batch_proto_filename = "batch.proto"
batch_proto_source_path = Path("..", "grpc", batch_proto_filename).resolve()
assert self.build_lib is not None
build_lib_path = Path(self.build_lib).resolve()
proto_module_relative_path = Path(
"mujoco_mpc", "proto", batch_proto_filename
)
batch_proto_destination_path = Path(
build_lib_path, proto_module_relative_path
)
batch_proto_destination_path.parent.mkdir(parents=True, exist_ok=True)
# Copy `batch_proto_filename` into current source.
shutil.copy(batch_proto_source_path, batch_proto_destination_path)
protoc_command_parts = [
# We use `__file__` as the first argument the same way as is done by
# `protoc` when called as `__main__` here:
# https://github.com/grpc/grpc/blob/21996c37842035661323c71b9e7040345f0915e2/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py#L172-L173.
__file__,
f"-I{build_lib_path}",
f"--python_out={build_lib_path}",
f"--grpc_python_out={build_lib_path}",
str(batch_proto_destination_path),
]
protoc_returncode = protoc.main(protoc_command_parts)
if protoc_returncode != 0:
raise subprocess.CalledProcessError(
returncode=protoc_returncode,
cmd=f"`protoc.main({protoc_command_parts})`",
)
self.spawn([
"touch",
str(batch_proto_destination_path.parent / "__init__.py"),
])
class CopyBatchServerBinaryCommand(setuptools.Command):
"""Specialized setup command to copy `batch_server` next to `batch.py`.
Assumes that the C++ gRPC `batch_server` binary has been manually built and
and located in the default `mujoco_mpc/build/bin` folder.
"""
description = "Copy `batch_server` next to `batch.py`."
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
def run(self):
self._copy_binary("batch_server")
# self._copy_binary("ui_batch_server")
def _copy_binary(self, binary_name):
source_path = Path(f"../build/bin/{binary_name}")
if not source_path.exists():
raise ValueError(
f"Cannot find `{binary_name}` binary from {source_path}. Please build"
" the `{binary_name}` C++ gRPC service."
)
assert self.build_lib is not None
build_lib_path = Path(self.build_lib).resolve()
destination_path = Path(build_lib_path, "mujoco_mpc", "mjpc", binary_name)
self.announce(f"{source_path.resolve()=}")
self.announce(f"{destination_path.resolve()=}")
destination_path.parent.mkdir(exist_ok=True, parents=True)
shutil.copy(source_path, destination_path)
class CopyTaskAssetsCommand(setuptools.Command):
"""Copies `batch_server` and `ui_batch_server` next to `batch.py`.
Assumes that the C++ gRPC `batch_server` binary has been manually built and
and located in the default `mujoco_mpc/build/bin` folder.
"""
description = (
"Copy task assets over to python source to make them accessible by"
" `Batch`."
)
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options("build_ext", ("build_lib", "build_lib"))
def run(self):
mjpc_tasks_path = Path(__file__).parent.parent / "mjpc" / "tasks"
source_paths = tuple(mjpc_tasks_path.rglob("*.xml"))
relative_source_paths = tuple(
p.relative_to(mjpc_tasks_path) for p in source_paths
)
assert self.build_lib is not None
build_lib_path = Path(self.build_lib).resolve()
destination_dir_path = Path(build_lib_path, "mujoco_mpc", "mjpc", "tasks")
self.announce(
f"Copying assets {relative_source_paths} from"
f" {mjpc_tasks_path} over to {destination_dir_path}."
)
for source_path, relative_source_path in zip(
source_paths, relative_source_paths
):
destination_path = destination_dir_path / relative_source_path
destination_path.parent.mkdir(exist_ok=True, parents=True)
shutil.copy(source_path, destination_path)
class BuildPyCommand(build_py.build_py):
"""Specialized Python builder to handle batch service dependencies.
During build, this will generate the `batch_pb2{_grpc}.py` files and copy
`batch_server` binary next to `batch.py`.
"""
user_options = build_py.build_py.user_options
def run(self):
self.run_command("generate_proto_grpc")
self.run_command("copy_task_assets")
super().run()
class CMakeExtension(setuptools.Extension):
"""A Python extension that has been prebuilt by CMake.
We do not want distutils to handle the build process for our extensions, so
so we pass an empty list to the super constructor.
"""
def __init__(self, name):
super().__init__(name, sources=[])
class BuildCMakeExtension(build_ext.build_ext):
"""Uses CMake to build extensions."""
def run(self):
self._configure_and_build_batch_server()
self.run_command("copy_batch_server_binary")
def _configure_and_build_batch_server(self):
"""Check for CMake."""
cmake_command = "cmake"
build_cfg = "Debug"
mujoco_mpc_root = Path(__file__).parent.parent
mujoco_mpc_build_dir = mujoco_mpc_root / "build"
cmake_configure_args = [
"-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=TRUE",
f"-DCMAKE_BUILD_TYPE:STRING={build_cfg}",
"-DBUILD_TESTING:BOOL=OFF",
"-DMJPC_BUILD_GRPC_SERVICE:BOOL=ON",
]
if platform.system() == "Darwin" and "ARCHFLAGS" in os.environ:
osx_archs = []
if "-arch x86_64" in os.environ["ARCHFLAGS"]:
osx_archs.append("x86_64")
if "-arch arm64" in os.environ["ARCHFLAGS"]:
osx_archs.append("arm64")
cmake_configure_args.append(
f"-DCMAKE_OSX_ARCHITECTURES={';'.join(osx_archs)}"
)
# TODO(hartikainen): We currently configure the builds into
# `mujoco_mpc/build`. This should use `self.build_{temp,lib}` instead, to
# isolate the Python builds from the C++ builds.
print("Configuring CMake with the following arguments:")
for arg in cmake_configure_args:
print(f" {arg}")
subprocess.check_call(
[
cmake_command,
*cmake_configure_args,
f"-S{mujoco_mpc_root.resolve()}",
f"-B{mujoco_mpc_build_dir.resolve()}",
],
cwd=mujoco_mpc_root,
)
print("Building `batch_server` and `ui_batch_server` with CMake")
subprocess.check_call(
[
cmake_command,
"--build",
str(mujoco_mpc_build_dir.resolve()),
"--target",
"batch_server",
# "ui_batch_server",
f"-j{os.cpu_count()}",
"--config",
build_cfg,
],
cwd=mujoco_mpc_root,
)
setuptools.setup(
name="mujoco_mpc",
version="0.1.0",
author="DeepMind",
author_email="[email protected]",
description="MuJoCo MPC (MJPC)",
url="https://github.com/google-deepmind/mujoco_mpc",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
],
packages=setuptools.find_packages(),
python_requires=">=3.7",
install_requires=[
"grpcio-tools",
"grpcio",
],
extras_require={
"test": [
"absl-py",
"mujoco >= 2.3.3",
],
},
ext_modules=[CMakeExtension("batch_server")],
cmdclass={
"build_py": BuildPyCommand,
"build_ext": BuildCMakeExtension,
"generate_proto_grpc": GenerateProtoGrpcCommand,
"copy_batch_server_binary": CopyBatchServerBinaryCommand,
"copy_task_assets": CopyTaskAssetsCommand,
},
package_data={
"": [
"mjpc/batch_server",
# "mjpc/ui_batch_server",
],
},
)
| mujoco_mpc-main | python/setup_batch.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for MuJoCo MPC."""
import os
import pathlib
import platform
import shutil
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
import subprocess
Path = pathlib.Path
class GenerateProtoGrpcCommand(setuptools.Command):
"""Specialized setup command to handle agent proto compilation.
Generates the `agent_pb2{_grpc}.py` files from `agent_proto`. Assumes that
`grpc_tools.protoc` is installed.
"""
description = "Generate `.proto` files to Python protobuf and gRPC files."
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
def run(self):
"""Generate `agent.proto` into `agent_pb2{_grpc}.py`.
This function looks more complicated than what it has to be because the
`protoc` generator is very particular in the way it generates the imports
for the generated `agent_pb2_grpc.py` file. The final argument of the
`protoc` call has to be "mujoco_mpc/agent.proto" in order for the import to
become `from mujoco_mpc import [agent_pb2_proto_import]` instead of just
`import [agent_pb2_proto_import]`. The latter would fail because the name is
meant to be relative but python3 interprets it as an absolute import.
"""
# We import here because, if the import is at the top of this file, we
# cannot resolve the dependencies without having `grpcio-tools` installed.
from grpc_tools import protoc # pylint: disable=import-outside-toplevel
agent_proto_filename = "agent.proto"
agent_proto_source_path = Path("..", "grpc", agent_proto_filename).resolve()
assert self.build_lib is not None
build_lib_path = Path(self.build_lib).resolve()
proto_module_relative_path = Path("mujoco_mpc", "proto", agent_proto_filename)
agent_proto_destination_path = Path(build_lib_path, proto_module_relative_path)
agent_proto_destination_path.parent.mkdir(parents=True, exist_ok=True)
# Copy `agent_proto_filename` into current source.
shutil.copy(agent_proto_source_path, agent_proto_destination_path)
protoc_command_parts = [
# We use `__file__` as the first argument the same way as is done by
# `protoc` when called as `__main__` here:
# https://github.com/grpc/grpc/blob/21996c37842035661323c71b9e7040345f0915e2/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py#L172-L173.
__file__,
f"-I{build_lib_path}",
f"--python_out={build_lib_path}",
f"--grpc_python_out={build_lib_path}",
str(agent_proto_destination_path),
]
protoc_returncode = protoc.main(protoc_command_parts)
if protoc_returncode != 0:
raise subprocess.CalledProcessError(
returncode=protoc_returncode,
cmd=f"`protoc.main({protoc_command_parts})`",
)
self.spawn(["touch", str(agent_proto_destination_path.parent / "__init__.py")])
class CopyAgentServerBinaryCommand(setuptools.Command):
"""Specialized setup command to copy `agent_server` next to `agent.py`.
Assumes that the C++ gRPC `agent_server` binary has been manually built and
and located in the default `mujoco_mpc/build/bin` folder.
"""
description = "Copy `agent_server` next to `agent.py`."
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
def run(self):
self._copy_binary("agent_server")
self._copy_binary("ui_agent_server")
def _copy_binary(self, binary_name):
source_path = Path(f"../build/bin/{binary_name}")
if not source_path.exists():
raise ValueError(
f"Cannot find `{binary_name}` binary from {source_path}. Please build"
" the `{binary_name}` C++ gRPC service."
)
assert self.build_lib is not None
build_lib_path = Path(self.build_lib).resolve()
destination_path = Path(build_lib_path, "mujoco_mpc", "mjpc", binary_name)
self.announce(f"{source_path.resolve()=}")
self.announce(f"{destination_path.resolve()=}")
destination_path.parent.mkdir(exist_ok=True, parents=True)
shutil.copy(source_path, destination_path)
class CopyTaskAssetsCommand(setuptools.Command):
"""Copies `agent_server` and `ui_agent_server` next to `agent.py`.
Assumes that the C++ gRPC `agent_server` binary has been manually built and
and located in the default `mujoco_mpc/build/bin` folder.
"""
description = (
"Copy task assets over to python source to make them accessible by" " `Agent`."
)
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options("build_ext", ("build_lib", "build_lib"))
def run(self):
mjpc_tasks_path = Path(__file__).parent.parent / "mjpc" / "tasks"
source_paths = tuple(mjpc_tasks_path.rglob("*.xml"))
relative_source_paths = tuple(p.relative_to(mjpc_tasks_path) for p in source_paths)
assert self.build_lib is not None
build_lib_path = Path(self.build_lib).resolve()
destination_dir_path = Path(build_lib_path, "mujoco_mpc", "mjpc", "tasks")
self.announce(
f"Copying assets {relative_source_paths} from"
f" {mjpc_tasks_path} over to {destination_dir_path}."
)
for source_path, relative_source_path in zip(source_paths, relative_source_paths):
destination_path = destination_dir_path / relative_source_path
destination_path.parent.mkdir(exist_ok=True, parents=True)
shutil.copy(source_path, destination_path)
class BuildPyCommand(build_py.build_py):
"""Specialized Python builder to handle agent service dependencies.
During build, this will generate the `agent_pb2{_grpc}.py` files and copy
`agent_server` binary next to `agent.py`.
"""
user_options = build_py.build_py.user_options
def run(self):
self.run_command("generate_proto_grpc")
self.run_command("copy_task_assets")
super().run()
class CMakeExtension(setuptools.Extension):
"""A Python extension that has been prebuilt by CMake.
We do not want distutils to handle the build process for our extensions, so
so we pass an empty list to the super constructor.
"""
def __init__(self, name):
super().__init__(name, sources=[])
class BuildCMakeExtension(build_ext.build_ext):
"""Uses CMake to build extensions."""
def run(self):
self._configure_and_build_agent_server()
self.run_command("copy_agent_server_binary")
def _configure_and_build_agent_server(self):
"""Check for CMake."""
cmake_command = "cmake"
build_cfg = "Debug"
mujoco_mpc_root = Path(__file__).parent.parent
mujoco_mpc_build_dir = mujoco_mpc_root / "build"
cmake_configure_args = [
"-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=TRUE",
f"-DCMAKE_BUILD_TYPE:STRING={build_cfg}",
"-DBUILD_TESTING:BOOL=OFF",
"-DMJPC_BUILD_GRPC_SERVICE:BOOL=ON",
]
if platform.system() == "Darwin" and "ARCHFLAGS" in os.environ:
osx_archs = []
if "-arch x86_64" in os.environ["ARCHFLAGS"]:
osx_archs.append("x86_64")
if "-arch arm64" in os.environ["ARCHFLAGS"]:
osx_archs.append("arm64")
cmake_configure_args.append(f"-DCMAKE_OSX_ARCHITECTURES={';'.join(osx_archs)}")
# TODO(hartikainen): We currently configure the builds into
# `mujoco_mpc/build`. This should use `self.build_{temp,lib}` instead, to
# isolate the Python builds from the C++ builds.
print("Configuring CMake with the following arguments:")
for arg in cmake_configure_args:
print(f" {arg}")
subprocess.check_call(
[
cmake_command,
*cmake_configure_args,
f"-S{mujoco_mpc_root.resolve()}",
f"-B{mujoco_mpc_build_dir.resolve()}",
],
cwd=mujoco_mpc_root,
)
print("Building `agent_server` and `ui_agent_server` with CMake")
subprocess.check_call(
[
cmake_command,
"--build",
str(mujoco_mpc_build_dir.resolve()),
"--target",
"agent_server",
"ui_agent_server",
f"-j{os.cpu_count()}",
"--config",
build_cfg,
],
cwd=mujoco_mpc_root,
)
setuptools.setup(
name="mujoco_mpc",
version="0.1.0",
author="DeepMind",
author_email="[email protected]",
description="MuJoCo MPC (MJPC)",
url="https://github.com/google-deepmind/mujoco_mpc",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
],
packages=setuptools.find_packages(),
python_requires=">=3.7",
install_requires=[
"grpcio-tools",
"grpcio",
],
extras_require={
"test": [
"absl-py",
"mujoco >= 2.3.3",
],
},
ext_modules=[CMakeExtension("agent_server")],
cmdclass={
"build_py": BuildPyCommand,
"build_ext": BuildCMakeExtension,
"generate_proto_grpc": GenerateProtoGrpcCommand,
"copy_agent_server_binary": CopyAgentServerBinaryCommand,
"copy_task_assets": CopyTaskAssetsCommand,
},
package_data={
"": [
"mjpc/agent_server",
"mjpc/ui_agent_server",
"mjpc/tasks/**/*.xml",
],
},
)
| mujoco_mpc-main | python/setup.py |
"""Dataclass for MJPC task parameters and cost weights."""
import dataclasses
from typing import Optional, Union
@dataclasses.dataclass(frozen=True)
class MjpcParameters:
"""Dataclass to store and set task mode, task parameters and cost weights."""
mode: Optional[str] = None
task_parameters: dict[str, Union[str, float]] = dataclasses.field(
default_factory=dict
)
cost_weights: dict[str, float] = dataclasses.field(default_factory=dict)
| mujoco_mpc-main | python/mujoco_mpc/mjpc_parameters.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interface for interface with Batch."""
import atexit
import os
import pathlib
import socket
import subprocess
import sys
import tempfile
from typing import Literal, Optional
import grpc
import mujoco
import numpy as np
from numpy import typing as npt
# INTERNAL IMPORT
from mujoco_mpc.proto import batch_pb2
from mujoco_mpc.proto import batch_pb2_grpc
def find_free_port() -> int:
"""Find an available TCP port on the system.
This function creates a temporary socket, binds it to an available port
chosen by the operating system, and returns the chosen port number.
Returns:
int: An available TCP port number.
"""
with socket.socket(family=socket.AF_INET6) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class Batch:
"""`Batch` class to interface with MuJoCo MPC batch estimator.
Attributes:
port:
channel:
stub:
server_process:
"""
def __init__(
self,
model: mujoco.MjModel,
configuration_length: int,
server_binary_path: Optional[str] = None,
send_as: Literal["mjb", "xml"] = "xml",
colab_logging: bool = True,
):
# server
if server_binary_path is None:
binary_name = "batch_server"
server_binary_path = pathlib.Path(__file__).parent / "mjpc" / binary_name
self._colab_logging = colab_logging
self.port = find_free_port()
self.server_process = subprocess.Popen(
[str(server_binary_path), f"--mjpc_port={self.port}"],
stdout=subprocess.PIPE if colab_logging else None,
)
os.set_blocking(self.server_process.stdout.fileno(), False)
atexit.register(self.server_process.kill)
credentials = grpc.local_channel_credentials(
grpc.LocalConnectionType.LOCAL_TCP
)
self.channel = grpc.secure_channel(f"localhost:{self.port}", credentials)
grpc.channel_ready_future(self.channel).result(timeout=10)
self.stub = batch_pb2_grpc.BatchStub(self.channel)
# initialize
self.init(
model,
configuration_length,
send_as=send_as,
)
def close(self):
self.channel.close()
self.server_process.kill()
self.server_process.wait()
def init(
self,
model: mujoco.MjModel,
configuration_length: int,
send_as: Literal["mjb", "xml"] = "xml",
):
"""Initialize the batch estimator estimation horizon with `configuration_length`.
Args:
model: optional `MjModel` instance, which, if provided, will be used as
the underlying model for planning. If not provided, the default MJPC
task xml will be used.
configuration_length: estimation horizon.
send_as: The serialization format for sending the model over gRPC; "xml".
"""
# setup model
def model_to_mjb(model: mujoco.MjModel) -> bytes:
buffer_size = mujoco.mj_sizeModel(model)
buffer = np.empty(shape=buffer_size, dtype=np.uint8)
mujoco.mj_saveModel(model, None, buffer)
return buffer.tobytes()
def model_to_xml(model: mujoco.MjModel) -> str:
tmp = tempfile.NamedTemporaryFile()
mujoco.mj_saveLastXML(tmp.name, model)
with pathlib.Path(tmp.name).open("rt") as f:
xml_string = f.read()
return xml_string
if model is not None:
if send_as == "mjb":
model_message = batch_pb2.MjModel(mjb=model_to_mjb(model))
else:
model_message = batch_pb2.MjModel(xml=model_to_xml(model))
else:
model_message = None
# initialize request
init_request = batch_pb2.InitRequest(
model=model_message,
configuration_length=configuration_length,
)
# initialize response
self._wait(self.stub.Init.future(init_request))
def data(
self,
index: int,
configuration: Optional[npt.ArrayLike] = [],
velocity: Optional[npt.ArrayLike] = [],
acceleration: Optional[npt.ArrayLike] = [],
time: Optional[npt.ArrayLike] = [],
ctrl: Optional[npt.ArrayLike] = [],
configuration_previous: Optional[npt.ArrayLike] = [],
sensor_measurement: Optional[npt.ArrayLike] = [],
sensor_prediction: Optional[npt.ArrayLike] = [],
sensor_mask: Optional[npt.ArrayLike] = [],
force_measurement: Optional[npt.ArrayLike] = [],
force_prediction: Optional[npt.ArrayLike] = [],
parameters: Optional[npt.ArrayLike] = [],
parameters_previous: Optional[npt.ArrayLike] = [],
) -> dict[str, np.ndarray]:
# assemble inputs
inputs = batch_pb2.Data(
configuration=configuration,
velocity=velocity,
acceleration=acceleration,
time=time,
ctrl=ctrl,
configuration_previous=configuration_previous,
sensor_measurement=sensor_measurement,
sensor_prediction=sensor_prediction,
sensor_mask=sensor_mask,
force_measurement=force_measurement,
force_prediction=force_prediction,
parameters=parameters,
parameters_previous=parameters_previous,
)
# data request
request = batch_pb2.DataRequest(data=inputs, index=index)
# data response
data = self._wait(self.stub.Data.future(request)).data
# return all data
return {
"configuration": np.array(data.configuration),
"velocity": np.array(data.velocity),
"acceleration": np.array(data.acceleration),
"time": np.array(data.time),
"ctrl": np.array(data.ctrl),
"configuration_previous": np.array(data.configuration_previous),
"sensor_measurement": np.array(data.sensor_measurement),
"sensor_prediction": np.array(data.sensor_prediction),
"sensor_mask": np.array(data.sensor_mask),
"force_measurement": np.array(data.force_measurement),
"force_prediction": np.array(data.force_prediction),
"parameters": np.array(data.parameters),
"parameters_previous": np.array(data.parameters_previous),
}
def settings(
self,
configuration_length: Optional[int] = None,
prior_flag: Optional[bool] = None,
sensor_flag: Optional[bool] = None,
force_flag: Optional[bool] = None,
max_search_iterations: Optional[int] = None,
max_smoother_iterations: Optional[int] = None,
gradient_tolerance: Optional[float] = None,
verbose_iteration: Optional[bool] = None,
verbose_optimize: Optional[bool] = None,
verbose_cost: Optional[bool] = None,
verbose_prior: Optional[bool] = None,
search_type: Optional[int] = None,
step_scaling: Optional[float] = None,
regularization_initial: Optional[float] = None,
regularization_scaling: Optional[float] = None,
time_scaling_force: Optional[bool] = None,
time_scaling_sensor: Optional[bool] = None,
search_direction_tolerance: Optional[float] = None,
cost_tolerance: Optional[float] = None,
assemble_prior_jacobian: Optional[bool] = None,
assemble_sensor_jacobian: Optional[bool] = None,
assemble_force_jacobian: Optional[bool] = None,
assemble_sensor_norm_hessian: Optional[bool] = None,
assemble_force_norm_hessian: Optional[bool] = None,
first_step_position_sensors: Optional[bool] = None,
last_step_position_sensors: Optional[bool] = None,
last_step_velocity_sensors: Optional[bool] = None,
) -> dict[str, int | bool]:
# assemble settings
inputs = batch_pb2.Settings(
configuration_length=configuration_length,
prior_flag=prior_flag,
sensor_flag=sensor_flag,
force_flag=force_flag,
max_search_iterations=max_search_iterations,
max_smoother_iterations=max_smoother_iterations,
gradient_tolerance=gradient_tolerance,
verbose_iteration=verbose_iteration,
verbose_optimize=verbose_optimize,
verbose_cost=verbose_cost,
verbose_prior=verbose_prior,
search_type=search_type,
step_scaling=step_scaling,
regularization_initial=regularization_initial,
regularization_scaling=regularization_scaling,
time_scaling_force=time_scaling_force,
time_scaling_sensor=time_scaling_sensor,
search_direction_tolerance=search_direction_tolerance,
cost_tolerance=cost_tolerance,
assemble_prior_jacobian=assemble_prior_jacobian,
assemble_sensor_jacobian=assemble_sensor_jacobian,
assemble_force_jacobian=assemble_force_jacobian,
assemble_sensor_norm_hessian=assemble_sensor_norm_hessian,
assemble_force_norm_hessian=assemble_force_norm_hessian,
first_step_position_sensors=first_step_position_sensors,
last_step_position_sensors=last_step_position_sensors,
last_step_velocity_sensors=last_step_velocity_sensors,
)
# settings request
request = batch_pb2.SettingsRequest(
settings=inputs,
)
# settings response
settings = self._wait(self.stub.Settings.future(request)).settings
# return all settings
return {
"configuration_length": settings.configuration_length,
"prior_flag": settings.prior_flag,
"sensor_flag": settings.sensor_flag,
"force_flag": settings.force_flag,
"max_search_iterations": settings.max_search_iterations,
"max_smoother_iterations": settings.max_smoother_iterations,
"gradient_tolerance": settings.gradient_tolerance,
"verbose_iteration": settings.verbose_iteration,
"verbose_optimize": settings.verbose_optimize,
"verbose_cost": settings.verbose_cost,
"verbose_prior": settings.verbose_prior,
"search_type": settings.search_type,
"step_scaling": settings.step_scaling,
"regularization_initial": settings.regularization_initial,
"regularization_scaling": settings.regularization_scaling,
"time_scaling_force": settings.time_scaling_force,
"time_scaling_sensor": settings.time_scaling_sensor,
"search_direction_tolerance": settings.search_direction_tolerance,
"cost_tolerance": settings.cost_tolerance,
"assemble_prior_jacobian": settings.assemble_prior_jacobian,
"assemble_sensor_jacobian": settings.assemble_sensor_jacobian,
"assemble_force_jacobian": settings.assemble_force_jacobian,
"assemble_sensor_norm_hessian": settings.assemble_sensor_norm_hessian,
"assemble_force_norm_hessian": settings.assemble_force_norm_hessian,
"first_step_position_sensors": settings.first_step_position_sensors,
"last_step_position_sensors": settings.last_step_position_sensors,
"last_step_velocity_sensors": settings.last_step_velocity_sensors,
}
def noise(
self,
process: Optional[npt.ArrayLike] = [],
sensor: Optional[npt.ArrayLike] = [],
parameter: Optional[npt.ArrayLike] = [],
) -> dict[str, np.ndarray]:
# assemble input noise
inputs = batch_pb2.Noise(
process=process,
sensor=sensor,
parameter=parameter,
)
# noise request
request = batch_pb2.NoiseRequest(noise=inputs)
# noise response
noise = self._wait(self.stub.Noise.future(request)).noise
# return noise
return {
"process": np.array(noise.process),
"sensor": np.array(noise.sensor),
"parameter": np.array(noise.parameter),
}
def norm(
self,
sensor_type: Optional[npt.ArrayLike] = [],
sensor_parameters: Optional[npt.ArrayLike] = [],
) -> dict[str, np.ndarray]:
# assemble input norm data
inputs = batch_pb2.Norm(
sensor_type=sensor_type,
sensor_parameters=sensor_parameters,
)
# norm request
request = batch_pb2.NormRequest(norm=inputs)
# norm response
norm = self._wait(self.stub.Norms.future(request)).norm
# return all norm data
return {
"sensor_type": norm.sensor_type,
"sensor_parameters": np.array(norm.sensor_parameters),
}
def cost(
self,
derivatives: Optional[bool] = False,
internals: Optional[bool] = False,
) -> dict[str, float | np.ndarray | int | list]:
# cost request
request = batch_pb2.CostRequest(
derivatives=derivatives, internals=internals
)
# cost response
cost = self._wait(self.stub.Cost.future(request))
# return all costs
return {
"total": cost.total,
"prior": cost.prior,
"sensor": cost.sensor,
"force": cost.force,
"initial": cost.initial,
"gradient": np.array(cost.gradient) if derivatives else [],
"hessian": np.array(cost.hessian).reshape(cost.nvar, cost.nvar)
if derivatives
else [],
"residual_prior": np.array(cost.residual_prior) if internals else [],
"residual_sensor": np.array(cost.residual_sensor) if internals else [],
"residual_force": np.array(cost.residual_force) if internals else [],
"jacobian_prior": np.array(cost.jacobian_prior).reshape(
cost.nvar, cost.nvar
)
if internals
else [],
"jacobian_sensor": np.array(cost.jacobian_sensor).reshape(
cost.nsensor, cost.nvar
)
if internals
else [],
"jacobian_force": np.array(cost.jacobian_force).reshape(
cost.nforce, cost.nvar
)
if internals
else [],
"norm_gradient_sensor": np.array(cost.norm_gradient_sensor)
if internals
else [],
"norm_gradient_force": np.array(cost.norm_gradient_force)
if internals
else [],
"prior_matrix": np.array(cost.prior_matrix).reshape(
cost.nvar, cost.nvar
)
if internals
else [],
"norm_hessian_sensor": np.array(cost.norm_hessian_sensor).reshape(
cost.nsensor, cost.nsensor
)
if internals
else [],
"norm_hessian_force": np.array(cost.norm_hessian_force).reshape(
cost.nforce, cost.nforce
)
if internals
else [],
"nvar": cost.nvar,
"nsensor": cost.nsensor,
"nforce": cost.nforce,
}
def status(self) -> dict[str, int]:
# status request
request = batch_pb2.StatusRequest()
# status response
status = self._wait(self.stub.Status.future(request)).status
# return all status
return {
"search_iterations": status.search_iterations,
"smoother_iterations": status.smoother_iterations,
"step_size": status.step_size,
"regularization": status.regularization,
"gradient_norm": status.gradient_norm,
"search_direction_norm": status.search_direction_norm,
"solve_status": status.solve_status,
"cost_difference": status.cost_difference,
"improvement": status.improvement,
"expected": status.expected,
"reduction_ratio": status.reduction_ratio,
}
def shift(self, shift: int) -> int:
# shift request
request = batch_pb2.ShiftRequest(shift=shift)
# return head (for testing)
return self._wait(self.stub.Shift.future(request)).head
def reset(self):
# reset request
request = batch_pb2.ResetRequest()
# reset response
self._wait(self.stub.Reset.future(request))
def optimize(self):
# optimize request
request = batch_pb2.OptimizeRequest()
# optimize response
self._wait(self.stub.Optimize.future(request))
def prior_weights(
self, weights: Optional[npt.ArrayLike] = None
) -> np.ndarray:
# prior request
request = batch_pb2.PriorWeightsRequest(
weights=weights.flatten() if weights is not None else None
)
# prior response
response = self._wait(self.stub.PriorWeights.future(request))
# reshape prior to (dimension, dimension)
mat = np.array(response.weights).reshape(
response.dimension, response.dimension
)
# return prior matrix
return mat
def sensor_info(self) -> dict[str, int]:
# info request
request = batch_pb2.SensorInfoRequest()
# info response
response = self._wait(self.stub.SensorInfo.future(request))
# return info
return {
"start_index": response.start_index,
"num_measurements": response.num_measurements,
"dim_measurements": response.dim_measurements,
}
def measurements_from_sensordata(self, data: npt.ArrayLike) -> np.ndarray:
# get sensor info
info = self.sensor_info()
# return measurements from sensor data
index = info["start_index"]
dim = info["dim_measurements"]
return data[index:(index + dim)]
def print_cost(self):
# get costs
cost = self.cost()
# print
print("cost:")
print(" [total] = ", cost["total"])
print(" prior = ", cost["prior"])
print(" sensor = ", cost["sensor"])
print(" force = ", cost["force"])
print(" (initial = ", cost["initial"], ")")
def print_status(self):
# get status
status = self.status()
# print
print("status:")
print(" search iterations = ", status["search_iterations"])
print(" smoother iterations = ", status["smoother_iterations"])
print(" step size = ", status["step_size"])
print(" regularization = ", status["regularization"])
print(" gradient norm = ", status["gradient_norm"])
def status_code(code):
if code == 0:
return "UNSOLVED"
elif code == 1:
return "SEARCH_FAILURE"
elif code == 2:
return "MAX_ITERATIONS_FAILURE"
elif code == 3:
return "SMALL_DIRECTION_FAILURE"
elif code == 4:
return "MAX_REGULARIZATION_FAILURE"
elif code == 5:
return "COST_DIFFERENCE_FAILURE"
elif code == 6:
return "EXPECTED_DECREASE_FAILURE"
elif code == 7:
return "SOLVED"
else:
return "CODE_ERROR"
print("- solve status = ", status_code(status["solve_status"]))
def _wait(self, future):
"""Waits for the future to complete, while printing out subprocess stdout."""
if self._colab_logging:
while True:
line = self.server_process.stdout.readline()
if line:
sys.stdout.write(line.decode("utf-8"))
if future.done():
break
return future.result()
| mujoco_mpc-main | python/mujoco_mpc/batch.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| mujoco_mpc-main | python/mujoco_mpc/__init__.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from absl.testing import absltest
from absl.testing import parameterized
import grpc
import mujoco
from mujoco_mpc import agent as agent_lib
import numpy as np
import pathlib
def get_observation(model, data):
del model
return np.concatenate([data.qpos, data.qvel])
def environment_step(model, data, action):
data.ctrl[:] = action
mujoco.mj_step(model, data)
return get_observation(model, data)
def environment_reset(model, data):
mujoco.mj_resetData(model, data)
return get_observation(model, data)
class AgentTest(parameterized.TestCase):
def test_set_task_parameters(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameters({"Goal": 13})
self.assertEqual(agent.get_task_parameters()["Goal"], 13)
def test_set_subprocess_working_dir(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
cwd = "$$$INVALID_PATH$$$"
with self.assertRaises(FileNotFoundError):
agent_lib.Agent(
task_id="Cartpole", model=model, subprocess_kwargs={"cwd": cwd}
)
cwd = os.getcwd()
with agent_lib.Agent(
task_id="Cartpole", model=model, subprocess_kwargs={"cwd": cwd}
) as agent:
agent.set_task_parameters({"Goal": 13})
self.assertEqual(agent.get_task_parameters()["Goal"], 13)
def test_step_env_with_planner(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/particle/task_timevarying.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
with agent_lib.Agent(task_id="Particle", model=model) as agent:
actions = []
observations = [environment_reset(model, data)]
num_steps = 10
for _ in range(num_steps):
agent.set_state(
time=data.time,
qpos=data.qpos,
qvel=data.qvel,
act=data.act,
mocap_pos=data.mocap_pos,
mocap_quat=data.mocap_quat,
userdata=data.userdata,
)
agent.planner_step()
actions.append(agent.get_action())
observations.append(environment_step(model, data, actions[-1]))
observations = np.array(observations)
actions = np.array(actions)
self.assertFalse((observations == 0).all())
self.assertFalse((actions == 0).all())
@parameterized.parameters({"nominal": False}, {"nominal": True})
def test_action_averaging_doesnt_change_state(self, nominal):
# when calling get_action with action averaging, the Agent needs to roll
# out physics, but the API should be implemented not to mutate the state
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
control_timestep = model.opt.timestep * 5
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameters({"Goal": 13})
agent.reset()
environment_reset(model, data)
agent.set_state(
time=data.time,
qpos=data.qpos,
qvel=data.qvel,
act=data.act,
mocap_pos=data.mocap_pos,
mocap_quat=data.mocap_quat,
userdata=data.userdata,
)
agent.get_action(averaging_duration=control_timestep, nominal_action=nominal)
state_after = agent.get_state()
self.assertEqual(data.time, state_after.time)
np.testing.assert_allclose(data.qpos, state_after.qpos)
np.testing.assert_allclose(data.qvel, state_after.qvel)
np.testing.assert_allclose(data.act, state_after.act)
np.testing.assert_allclose(data.userdata, state_after.userdata)
def test_action_averaging_improves_control(self):
# try controlling the cartpole task at 1/10th frequency with action
# repeats, and with action averaging.
# expect action averaging to be a bit better
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
repeats = 10
control_timestep = model.opt.timestep * repeats
def get_action_simple(agent):
return agent.get_action()
def get_action_averaging(agent):
return agent.get_action(averaging_duration=control_timestep)
def run_episode(agent, get_action):
agent.set_task_parameters({"Goal": 13})
agent.reset()
environment_reset(model, data)
num_steps = 10
total_cost = 0.0
for _ in range(num_steps):
agent.set_state(
time=data.time,
qpos=data.qpos,
qvel=data.qvel,
act=data.act,
mocap_pos=data.mocap_pos,
mocap_quat=data.mocap_quat,
userdata=data.userdata,
)
agent.planner_step()
action = get_action(agent)
for _ in range(repeats):
environment_step(model, data, action)
total_cost += agent.get_total_cost()
return total_cost
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
averaging_cost = run_episode(agent, get_action_averaging)
repeat_cost = run_episode(agent, get_action_simple)
self.assertLess(averaging_cost, repeat_cost)
# averaging actions should be better but not amazingly so.
self.assertLess(
np.abs(averaging_cost - repeat_cost) / repeat_cost,
0.1,
"Difference between costs is too large.",
)
def test_stepping_on_agent_side(self):
"""Test an alternative way of stepping the physics, on the agent side."""
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameter("Goal", -1.0)
num_steps = 10
observations = []
for _ in range(num_steps):
agent.planner_step()
agent.step()
state = agent.get_state()
data.time = state.time
data.qpos = state.qpos
data.qvel = state.qvel
data.act = state.act
data.mocap_pos = np.array(state.mocap_pos).reshape(data.mocap_pos.shape)
data.mocap_quat = np.array(state.mocap_quat).reshape(data.mocap_quat.shape)
data.userdata = np.array(state.userdata).reshape(data.userdata.shape)
observations.append(get_observation(model, data))
self.assertNotEqual(agent.get_state().time, 0)
observations = np.array(observations)
self.assertFalse((observations == 0).all())
def test_set_cost_weights(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# by default, planner would produce a non-zero action
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameter("Goal", -1.0)
agent.planner_step()
action = agent.get_action()
self.assertFalse(np.allclose(action, 0))
# setting all costs to 0 apart from control should end up with a zero
# action
agent.reset()
agent.set_task_parameter("Goal", -1.0)
agent.set_cost_weights(
{"Vertical": 0, "Velocity": 0, "Centered": 0, "Control": 1}
)
agent.planner_step()
action = agent.get_action()
np.testing.assert_allclose(action, 0)
def test_get_cost_weights(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# by default, planner would produce a non-zero action
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameter("Goal", -1.0)
agent.planner_step()
cost = agent.get_total_cost()
self.assertNotEqual(cost, 0)
agent.reset()
agent.set_task_parameter("Goal", -1.0)
agent.set_cost_weights(
{"Vertical": 1, "Velocity": 0, "Centered": 1, "Control": 0}
)
for _ in range(10):
agent.planner_step()
agent.step()
agent.set_task_parameter("Goal", 1.0)
agent.set_cost_weights(
{"Vertical": 1, "Velocity": 1, "Centered": 1, "Control": 1}
)
self.assertEqual(
agent.get_cost_weights(),
{"Vertical": 1, "Velocity": 1, "Centered": 1, "Control": 1},
)
agent.set_state(qpos=[0, 0.5], qvel=[1, 1])
terms_dict = agent.get_cost_term_values()
terms = list(terms_dict.values())
self.assertFalse(np.any(np.isclose(terms, 0, rtol=0, atol=1e-4)))
def test_set_state_with_lists(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/particle/task_timevarying.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
with agent_lib.Agent(task_id="Particle", model=model) as agent:
agent.set_state(
time=data.time,
qpos=list(data.qpos),
qvel=list(data.qvel),
act=list(data.act),
mocap_pos=list(data.mocap_pos.flatten()),
mocap_quat=list(data.mocap_quat.flatten()),
userdata=list(data.userdata),
)
agent.planner_step()
def test_get_set_default_mode(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with agent_lib.Agent(task_id="Cartpole", model=model) as agent:
agent.set_mode("default_mode")
self.assertEqual(agent.get_mode(), "default_mode")
@absltest.skip("asset import issue")
def test_get_set_mode(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/quadruped/task_flat.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with agent_lib.Agent(task_id="Quadruped Flat", model=model) as agent:
agent.set_mode("Walk")
self.assertEqual(agent.get_mode(), "Walk")
@absltest.skip("asset import issue")
def test_set_mode_error(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/quadruped/task_flat.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with agent_lib.Agent(task_id="Quadruped Flat", model=model) as agent:
self.assertRaises(grpc.RpcError, lambda: agent.set_mode("Run"))
if __name__ == "__main__":
absltest.main()
| mujoco_mpc-main | python/mujoco_mpc/agent_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A test for agent.py that brings up a UI. Can only be run locally."""
import time
from absl.testing import absltest
import grpc
import mujoco
from mujoco_mpc import agent as agent_lib
import numpy as np
import pathlib
def get_observation(model, data):
del model
return np.concatenate([data.qpos, data.qvel])
def environment_step(model, data, action):
data.ctrl[:] = action
mujoco.mj_step(model, data)
return get_observation(model, data)
def environment_reset(model, data):
mujoco.mj_resetData(model, data)
return get_observation(model, data)
class UiAgentTest(absltest.TestCase):
def test_stepping_on_agent_side(self):
"""Test an alternative way of stepping the physics, on the agent side."""
model_path = (
pathlib.Path(__file__).parent.parent.parent / "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
with self.get_agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameter("Goal", -1.0)
num_steps = 10
observations = []
for _ in range(num_steps):
agent.planner_step()
time.sleep(0.1)
agent.step()
state = agent.get_state()
data.time = state.time
data.qpos = state.qpos
data.qvel = state.qvel
data.act = state.act
data.mocap_pos = np.array(state.mocap_pos).reshape(data.mocap_pos.shape)
data.mocap_quat = np.array(state.mocap_quat).reshape(data.mocap_quat.shape)
data.userdata = np.array(state.userdata).reshape(data.userdata.shape)
observations.append(get_observation(model, data))
self.assertNotEqual(agent.get_state().time, 0)
observations = np.array(observations)
self.assertFalse((observations == 0).all())
def test_set_cost_weights(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent / "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# by default, planner would produce a non-zero action
with self.get_agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameter("Goal", -1.0)
agent.planner_step()
# wait so a planning cycle definitely finishes
# TODO(nimrod): make sure planner_step waits for a planning step
time.sleep(0.5)
action = agent.get_action()
self.assertFalse(np.allclose(action, 0))
# setting all costs to 0 apart from control should end up with a zero
# action
agent.reset()
agent.set_task_parameter("Goal", -1.0)
agent.set_cost_weights(
{"Vertical": 0, "Velocity": 0, "Centered": 0, "Control": 1}
)
# wait so a planning cycle definitely finishes
# TODO(nimrod): make sure planner_step waits for a planning step
time.sleep(0.5)
action = agent.get_action()
np.testing.assert_allclose(0, action, rtol=1, atol=1e-7)
def test_get_cost_weights(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent / "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# by default, planner would produce a non-zero action
with self.get_agent(task_id="Cartpole", model=model) as agent:
agent.set_task_parameter("Goal", -1.0)
agent.planner_step()
cost = agent.get_total_cost()
self.assertNotEqual(cost, 0)
agent.reset()
agent.set_task_parameter("Goal", -1.0)
agent.set_cost_weights(
{"Vertical": 1, "Velocity": 0, "Centered": 1, "Control": 0}
)
for _ in range(10):
agent.planner_step()
agent.step()
agent.set_task_parameter("Goal", 1.0)
agent.set_cost_weights(
{"Vertical": 1, "Velocity": 1, "Centered": 1, "Control": 1}
)
agent.set_state(qpos=[0, 0.5], qvel=[1, 1])
terms_dict = agent.get_cost_term_values()
terms = list(terms_dict.values())
self.assertFalse(np.any(np.isclose(terms, 0, rtol=0, atol=1e-6)))
def test_set_state_with_lists(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/particle/task_timevarying.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
data = mujoco.MjData(model)
with self.get_agent(task_id="Particle", model=model) as agent:
agent.set_state(
time=data.time,
qpos=list(data.qpos),
qvel=list(data.qvel),
act=list(data.act),
mocap_pos=list(data.mocap_pos.flatten()),
mocap_quat=list(data.mocap_quat.flatten()),
userdata=list(data.userdata),
)
agent.planner_step()
def test_set_get_mode(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent / "mjpc/tasks/cartpole/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with self.get_agent(task_id="Cartpole", model=model) as agent:
agent.set_mode("default_mode")
self.assertEqual(agent.get_mode(), "default_mode")
@absltest.skip("asset import issue")
def test_get_set_mode(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/quadruped/task_flat.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with self.get_agent(task_id="Quadruped Flat", model=model) as agent:
agent.set_mode("Walk")
self.assertEqual(agent.get_mode(), "Walk")
@absltest.skip("asset import issue")
def test_set_mode_error(self):
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/tasks/quadruped/task_flat.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
with self.get_agent(task_id="Quadruped Flat", model=model) as agent:
self.assertRaises(grpc.RpcError, lambda: agent.set_mode("Run"))
def get_agent(self, **kwargs) -> agent_lib.Agent:
return agent_lib.Agent(
server_binary_path=pathlib.Path(agent_lib.__file__).parent
/ "mjpc"
/ "ui_agent_server",
**kwargs
)
if __name__ == "__main__":
absltest.main()
| mujoco_mpc-main | python/mujoco_mpc/ui_agent_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interface for the to interface with MuJoCo MPC agents."""
import atexit
import contextlib
import pathlib
import socket
import subprocess
import tempfile
from typing import Any, Literal, Mapping, Optional, Sequence
import grpc
import mujoco
from mujoco_mpc import mjpc_parameters
import numpy as np
from numpy import typing as npt
# INTERNAL IMPORT
from mujoco_mpc.proto import agent_pb2
from mujoco_mpc.proto import agent_pb2_grpc
def find_free_port() -> int:
"""Find an available TCP port on the system.
This function creates a temporary socket, binds it to an available port
chosen by the operating system, and returns the chosen port number.
Returns:
int: An available TCP port number.
"""
with socket.socket(family=socket.AF_INET6) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class Agent(contextlib.AbstractContextManager):
"""`Agent` class to interface with MuJoCo MPC agents.
Attributes:
task_id:
model:
port:
channel:
stub:
server_process:
"""
def __init__(
self,
task_id: str,
model: Optional[mujoco.MjModel] = None,
server_binary_path: Optional[str] = None,
extra_flags: Sequence[str] = (),
real_time_speed: float = 1.0,
subprocess_kwargs: Optional[Mapping[str, Any]] = None,
):
self.task_id = task_id
self.model = model
if server_binary_path is None:
binary_name = "agent_server"
server_binary_path = pathlib.Path(__file__).parent / "mjpc" / binary_name
self.port = find_free_port()
self.server_process = subprocess.Popen(
[str(server_binary_path), f"--mjpc_port={self.port}"]
+ list(extra_flags),
**(subprocess_kwargs or {}),
)
atexit.register(self.server_process.kill)
credentials = grpc.local_channel_credentials(grpc.LocalConnectionType.LOCAL_TCP)
self.channel = grpc.secure_channel(f"localhost:{self.port}", credentials)
grpc.channel_ready_future(self.channel).result(timeout=30)
self.stub = agent_pb2_grpc.AgentStub(self.channel)
self.init(
task_id,
model,
send_as="mjb",
real_time_speed=real_time_speed,
)
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
self.channel.close()
self.server_process.kill()
self.server_process.wait()
def init(
self,
task_id: str,
model: Optional[mujoco.MjModel] = None,
send_as: Literal["mjb", "xml"] = "xml",
real_time_speed: float = 1.0,
):
"""Initialize the agent for task `task_id`.
Args:
task_id: the identifier for the MuJoCo MPC task, for example "Cartpole" or
"Humanoid Track".
model: optional `MjModel` instance, which, if provided, will be used as
the underlying model for planning. If not provided, the default MJPC
task xml will be used.
send_as: The serialization format for sending the model over gRPC. Either
"mjb" or "xml".
real_time_speed: ratio of running speed to wall clock, from 0 to 1. Only
affects async (UI) binaries, and not ones where planning is
synchronous.
"""
def model_to_mjb(model: mujoco.MjModel) -> bytes:
buffer_size = mujoco.mj_sizeModel(model)
buffer = np.empty(shape=buffer_size, dtype=np.uint8)
mujoco.mj_saveModel(model, None, buffer)
return buffer.tobytes()
def model_to_xml(model: mujoco.MjModel) -> str:
tmp = tempfile.NamedTemporaryFile()
mujoco.mj_saveLastXML(tmp.name, model)
with pathlib.Path(tmp.name).open("rt") as f:
xml_string = f.read()
return xml_string
if model is not None:
if send_as == "mjb":
model_message = agent_pb2.MjModel(mjb=model_to_mjb(model))
else:
model_message = agent_pb2.MjModel(xml=model_to_xml(model))
else:
model_message = None
init_request = agent_pb2.InitRequest(
task_id=task_id, model=model_message, real_time_speed=real_time_speed
)
self.stub.Init(init_request)
def set_state(
self,
time: Optional[float] = None,
qpos: Optional[npt.ArrayLike] = None,
qvel: Optional[npt.ArrayLike] = None,
act: Optional[npt.ArrayLike] = None,
mocap_pos: Optional[npt.ArrayLike] = None,
mocap_quat: Optional[npt.ArrayLike] = None,
userdata: Optional[npt.ArrayLike] = None,
):
"""Set `Agent`'s MuJoCo `data` state.
Args:
time: `data.time`, i.e. the simulation time.
qpos: `data.qpos`.
qvel: `data.qvel`.
act: `data.act`.
mocap_pos: `data.mocap_pos`.
mocap_quat: `data.mocap_quat`.
userdata: `data.userdata`.
"""
# if mocap_pos is an ndarray rather than a list, flatten it
if hasattr(mocap_pos, "flatten"):
mocap_pos = mocap_pos.flatten()
if hasattr(mocap_quat, "flatten"):
mocap_quat = mocap_quat.flatten()
state = agent_pb2.State(
time=time if time is not None else None,
qpos=qpos if qpos is not None else [],
qvel=qvel if qvel is not None else [],
act=act if act is not None else [],
mocap_pos=mocap_pos if mocap_pos is not None else [],
mocap_quat=mocap_quat if mocap_quat is not None else [],
userdata=userdata if userdata is not None else [],
)
set_state_request = agent_pb2.SetStateRequest(state=state)
self.stub.SetState(set_state_request)
def get_state(self) -> agent_pb2.State:
return self.stub.GetState(agent_pb2.GetStateRequest()).state
def get_action(
self,
time: Optional[float] = None,
averaging_duration: float = 0,
nominal_action: bool = False,
) -> np.ndarray:
"""Return latest `action` from the `Agent`'s planner.
Args:
time: `data.time`, i.e. the simulation time.
averaging_duration: the duration over which actions should be averaged
(e.g. the control timestep).
nominal_action: if True, don't apply feedback terms in the policy
Returns:
action: `Agent`'s planner's latest action.
"""
get_action_request = agent_pb2.GetActionRequest(
time=time,
averaging_duration=averaging_duration,
nominal_action=nominal_action,
)
get_action_response = self.stub.GetAction(get_action_request)
return np.array(get_action_response.action)
def get_total_cost(self) -> float:
terms = self.stub.GetCostValuesAndWeights(
agent_pb2.GetCostValuesAndWeightsRequest()
)
total_cost = 0
for _, value_weight in terms.values_weights.items():
total_cost += value_weight.weight * value_weight.value
return total_cost
def get_cost_term_values(self) -> dict[str, float]:
terms = self.stub.GetCostValuesAndWeights(
agent_pb2.GetCostValuesAndWeightsRequest()
)
return {
name: value_weight.value for name, value_weight in terms.values_weights.items()
}
def planner_step(self):
"""Send a planner request."""
planner_step_request = agent_pb2.PlannerStepRequest()
self.stub.PlannerStep(planner_step_request)
def step(self):
"""Step the physics on the agent side."""
self.stub.Step(agent_pb2.StepRequest())
def reset(self):
"""Reset the `Agent`'s data, settings, planner, and states."""
reset_request = agent_pb2.ResetRequest()
self.stub.Reset(reset_request)
def set_task_parameter(self, name: str, value: float):
"""Set the `Agent`'s task parameters.
Args:
name: the name to identify the parameter.
value: value to to set the parameter to.
"""
self.set_task_parameters({name: value})
def set_task_parameters(self, parameters: dict[str, float | str]):
"""Sets the `Agent`'s task parameters.
Args:
parameters: a map from parameter name to value. string values will be
treated as "selection" values, i.e. parameters with names that start
with "residual_select_" in the XML.
"""
request = agent_pb2.SetTaskParametersRequest()
for name, value in parameters.items():
if isinstance(value, str):
request.parameters[name].selection = value
else:
request.parameters[name].numeric = value
self.stub.SetTaskParameters(request)
def get_task_parameters(self) -> dict[str, float | str]:
"""Returns the agent's task parameters."""
response = self.stub.GetTaskParameters(agent_pb2.GetTaskParametersRequest())
result = {}
for name, value in response.parameters.items():
if value.selection:
result[name] = value.selection
else:
result[name] = value.numeric
return result
def set_cost_weights(
self, weights: dict[str, float], reset_to_defaults: bool = False
):
"""Sets the agent's cost weights by name.
Args:
weights: a map for cost term name to weight value
reset_to_defaults: if true, cost weights will be reset before applying the
map
"""
request = agent_pb2.SetCostWeightsRequest(
cost_weights=weights, reset_to_defaults=reset_to_defaults
)
self.stub.SetCostWeights(request)
def get_cost_weights(self) -> dict[str, float]:
"""Returns the agent's cost weights."""
terms = self.stub.GetCostValuesAndWeights(
agent_pb2.GetCostValuesAndWeightsRequest()
)
return {
name: value_weight.weight for name, value_weight in terms.values_weights.items()
}
def get_mode(self) -> str:
return self.stub.GetMode(agent_pb2.GetModeRequest()).mode
def set_mode(self, mode: str):
request = agent_pb2.SetModeRequest(mode=mode)
self.stub.SetMode(request)
def set_parameters(self, parameters: mjpc_parameters.MjpcParameters):
# TODO(nimrod): Add a single RPC that does this
if parameters.mode is not None:
self.set_mode(parameters.mode)
if parameters.task_parameters:
self.set_task_parameters(parameters.task_parameters)
if parameters.cost_weights:
self.set_cost_weights(parameters.cost_weights)
| mujoco_mpc-main | python/mujoco_mpc/agent.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from absl.testing import absltest
import mujoco
from mujoco_mpc import batch as batch_lib
import numpy as np
import pathlib
class BatchTest(absltest.TestCase):
def test_data(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# time index
index = 0
## configuration
# set
configuration = np.random.rand(model.nq)
data = batch.data(index, configuration=configuration)
# test that input and output match
self.assertLess(
np.linalg.norm(configuration - data["configuration"]), 1.0e-5
)
## velocity
# set
velocity = np.random.rand(model.nv)
data = batch.data(index, velocity=velocity)
# test that input and output match
self.assertLess(np.linalg.norm(velocity - data["velocity"]), 1.0e-5)
## acceleration
# set
acceleration = np.random.rand(model.nv)
data = batch.data(index, acceleration=acceleration)
# test that input and output match
self.assertLess(np.linalg.norm(acceleration - data["acceleration"]), 1.0e-5)
## time
# set
time = np.random.rand(1)
data = batch.data(index, time=time)
# test that input and output match
self.assertLess(np.linalg.norm(time - data["time"]), 1.0e-5)
## ctrl
# set
ctrl = np.random.rand(model.nu)
data = batch.data(index, ctrl=ctrl)
# test that input and output match
self.assertLess(np.linalg.norm(ctrl - data["ctrl"]), 1.0e-5)
## configuration prev
# set
configuration_previous = np.random.rand(model.nq)
data = batch.data(index, configuration_previous=configuration_previous)
# test that input and output match
self.assertLess(
np.linalg.norm(configuration_previous - data["configuration_previous"]),
1.0e-5,
)
## sensor measurement
# set
sensor_measurement = np.random.rand(model.nsensordata)
data = batch.data(index, sensor_measurement=sensor_measurement)
# test that input and output match
self.assertLess(
np.linalg.norm(sensor_measurement - data["sensor_measurement"]),
1.0e-5,
)
## sensor prediction
# set
sensor_prediction = np.random.rand(model.nsensordata)
data = batch.data(index, sensor_prediction=sensor_prediction)
# test that input and output match
self.assertLess(
np.linalg.norm(sensor_prediction - data["sensor_prediction"]),
1.0e-5,
)
## sensor mask
# set
sensor_mask = np.array([1, 0, 1, 0], dtype=int)
data = batch.data(index, sensor_mask=sensor_mask)
# test that input and output match
self.assertLess(np.linalg.norm(sensor_mask - data["sensor_mask"]), 1.0e-5)
## force measurement
# set
force_measurement = np.random.rand(model.nv)
data = batch.data(index, force_measurement=force_measurement)
# test that input and output match
self.assertLess(
np.linalg.norm(force_measurement - data["force_measurement"]),
1.0e-5,
)
## force prediction
# set
force_prediction = np.random.rand(model.nv)
data = batch.data(index, force_prediction=force_prediction)
# test that input and output match
self.assertLess(
np.linalg.norm(force_prediction - data["force_prediction"]), 1.0e-5
)
def test_settings(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 15
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# initial configuration length
settings = batch.settings()
self.assertTrue(configuration_length == settings["configuration_length"])
# get/set configuration length
in_configuration_length = 7
settings = batch.settings(configuration_length=in_configuration_length)
self.assertTrue(in_configuration_length == settings["configuration_length"])
# get/set prior flag
in_prior_flag = False
settings = batch.settings(prior_flag=in_prior_flag)
self.assertTrue(in_prior_flag == settings["prior_flag"])
# get/set sensor flag
in_sensor_flag = False
settings = batch.settings(sensor_flag=in_sensor_flag)
self.assertTrue(in_sensor_flag == settings["sensor_flag"])
# get/set force flag
in_force_flag = False
settings = batch.settings(force_flag=in_force_flag)
self.assertTrue(in_force_flag == settings["force_flag"])
# get/set search iterations
in_search_iterations = 25
settings = batch.settings(max_search_iterations=in_search_iterations)
self.assertTrue(in_search_iterations == settings["max_search_iterations"])
# get/set smoother iterations
in_smoother_iterations = 25
settings = batch.settings(max_smoother_iterations=in_smoother_iterations)
self.assertTrue(
in_smoother_iterations == settings["max_smoother_iterations"]
)
# get/set gradient tolerance
gradient_tolerance = 1.23456
settings = batch.settings(gradient_tolerance=gradient_tolerance)
self.assertLess(
np.abs(gradient_tolerance - settings["gradient_tolerance"]), 1.0e-6
)
# get/set verbose iteration
verbose_iteration = True
settings = batch.settings(verbose_iteration=verbose_iteration)
self.assertTrue(verbose_iteration == settings["verbose_iteration"])
# get/set verbose optimize
verbose_optimize = True
settings = batch.settings(verbose_optimize=verbose_optimize)
self.assertTrue(verbose_optimize == settings["verbose_optimize"])
# get/set verbose cost
verbose_cost = True
settings = batch.settings(verbose_cost=verbose_cost)
self.assertTrue(verbose_cost == settings["verbose_cost"])
# get/set verbose prior
verbose_prior = True
settings = batch.settings(verbose_prior=verbose_prior)
self.assertTrue(verbose_prior == settings["verbose_prior"])
# get/set search type
in_search_type = 0
settings = batch.settings(search_type=in_search_type)
self.assertTrue(in_search_type == settings["search_type"])
# get/set step scaling
in_step_scaling = 2.5
settings = batch.settings(step_scaling=in_step_scaling)
self.assertLess(np.abs(in_step_scaling - settings["step_scaling"]), 1.0e-4)
# get/set regularization initial
in_regularization_initial = 3.0e1
settings = batch.settings(regularization_initial=in_regularization_initial)
self.assertLess(
np.abs(in_regularization_initial - settings["regularization_initial"]),
1.0e-4,
)
# get/set regularization scaling
in_regularization_scaling = 7.1
settings = batch.settings(regularization_scaling=in_regularization_scaling)
self.assertLess(
np.abs(in_regularization_scaling - settings["regularization_scaling"]),
1.0e-4,
)
# get/set search direction tolerance
search_direction_tolerance = 3.3
settings = batch.settings(
search_direction_tolerance=search_direction_tolerance
)
self.assertLess(
np.abs(
search_direction_tolerance - settings["search_direction_tolerance"]
),
1.0e-5,
)
# get/set cost tolerance
cost_tolerance = 1.0e-3
settings = batch.settings(cost_tolerance=cost_tolerance)
self.assertLess(np.abs(cost_tolerance - settings["cost_tolerance"]), 1.0e-5)
# get/set assemble prior Jacobian
assemble_prior_jacobian = True
settings = batch.settings(assemble_prior_jacobian=assemble_prior_jacobian)
self.assertTrue(
assemble_prior_jacobian == settings["assemble_prior_jacobian"]
)
# get/set assemble sensor Jacobian
assemble_sensor_jacobian = True
settings = batch.settings(assemble_sensor_jacobian=assemble_sensor_jacobian)
self.assertTrue(
assemble_sensor_jacobian == settings["assemble_sensor_jacobian"]
)
# get/set assemble force Jacobian
assemble_force_jacobian = True
settings = batch.settings(assemble_force_jacobian=assemble_force_jacobian)
self.assertTrue(
assemble_force_jacobian == settings["assemble_force_jacobian"]
)
# get/set assemble sensor norm Hessian
assemble_sensor_norm_hessian = True
settings = batch.settings(
assemble_sensor_norm_hessian=assemble_sensor_norm_hessian
)
self.assertTrue(
assemble_sensor_norm_hessian == settings["assemble_sensor_norm_hessian"]
)
# get/set assemble force norm Hessian
assemble_force_norm_hessian = True
settings = batch.settings(
assemble_force_norm_hessian=assemble_force_norm_hessian
)
self.assertTrue(
assemble_force_norm_hessian == settings["assemble_force_norm_hessian"]
)
# get/set first step position sensors
first_step_position_sensors = True
settings = batch.settings(
first_step_position_sensors=first_step_position_sensors
)
self.assertTrue(
first_step_position_sensors == settings["first_step_position_sensors"]
)
# get/set last step position sensors
last_step_position_sensors = True
settings = batch.settings(
last_step_position_sensors=last_step_position_sensors
)
self.assertTrue(
last_step_position_sensors == settings["last_step_position_sensors"]
)
# get/set last step velocity sensors
last_step_velocity_sensors = True
settings = batch.settings(
last_step_velocity_sensors=last_step_velocity_sensors
)
self.assertTrue(
last_step_velocity_sensors == settings["last_step_velocity_sensors"]
)
def test_costs(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# cost
cost = batch.cost(derivatives=True, internals=True)
self.assertLess(np.abs(cost["total"] - 0.0), 1.0e-5)
# cost prior
self.assertLess(np.abs(cost["prior"] - 0.0), 1.0e-5)
# cost sensor
self.assertLess(np.abs(cost["sensor"] - 0.0), 1.0e-5)
# cost force
self.assertLess(np.abs(cost["force"] - 0.0), 1.0e-5)
# cost initial
self.assertLess(np.abs(cost["initial"] - 0.0), 1.0e-5)
# derivatives
nvar = model.nv * configuration_length
nsensor = model.nsensordata * (configuration_length - 1)
nforce = model.nv * (configuration_length - 2)
self.assertTrue(nvar == cost["nvar"])
self.assertTrue(nsensor == cost["nsensor"])
self.assertTrue(nforce == cost["nforce"])
self.assertTrue(cost["gradient"].size == nvar)
self.assertTrue(cost["hessian"].shape == (nvar, nvar))
self.assertTrue(cost["residual_prior"].size == nvar)
self.assertTrue(cost["residual_sensor"].size == nsensor)
self.assertTrue(cost["residual_force"].size == nforce)
self.assertTrue(cost["jacobian_prior"].shape == (nvar, nvar))
self.assertTrue(cost["jacobian_sensor"].shape == (nsensor, nvar))
self.assertTrue(cost["jacobian_force"].shape == (nforce, nvar))
self.assertTrue(cost["norm_gradient_sensor"].size == nsensor)
self.assertTrue(cost["norm_gradient_force"].size == nforce)
self.assertTrue(cost["prior_matrix"].shape == (nvar, nvar))
self.assertTrue(cost["norm_hessian_sensor"].shape == (nsensor, nsensor))
self.assertTrue(cost["norm_hessian_force"].shape == (nforce, nforce))
def test_noise(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
## process
in_process = np.random.rand(model.nv)
noise = batch.noise(process=in_process)
self.assertLess(np.linalg.norm(in_process - noise["process"]), 1.0e-5)
## sensor
in_sensor = np.random.rand(model.nsensor)
noise = batch.noise(sensor=in_sensor)
self.assertLess(np.linalg.norm(in_sensor - noise["sensor"]), 1.0e-5)
def test_shift(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# no shift
head = batch.shift(0)
self.assertTrue(head == 0)
# shift
shift = 1
head = batch.shift(shift)
self.assertTrue(head == 1)
shift = 2
head = batch.shift(shift)
self.assertTrue(head == 3)
def test_reset(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# set
index = 1
configuration = np.random.rand(model.nq)
sensor_measurement = np.random.rand(model.nsensordata)
data = batch.data(
index,
configuration=configuration,
sensor_measurement=sensor_measurement,
)
# check that elements are set
self.assertLess(0, np.linalg.norm(data["configuration"]))
self.assertLess(0, np.linalg.norm(data["sensor_measurement"]))
# reset
batch.reset()
# get data
data = batch.data(index)
# check that elements are reset to zero
self.assertLess(np.linalg.norm(data["configuration"]), 1.0e-5)
self.assertLess(np.linalg.norm(data["sensor_measurement"]), 1.0e-5)
def test_optimize(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# TODO(taylor): setup
# optimize
# batch.optimize()
def test_status(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# TODO(taylor): setup
status = batch.status()
# search iterations
self.assertTrue(status["search_iterations"] == 0)
# smoother iterations
self.assertTrue(status["smoother_iterations"] == 0)
# step size
self.assertLess(np.abs(status["step_size"] - 1.0), 1.0e-5)
# # regularization
# self.assertTrue(
# np.abs(
# status["regularization"]
# - batch.settings()["regularization_initial"]
# ),
# 1.0e-6,
# )
# gradient norm
self.assertLess(np.abs(status["gradient_norm"]), 1.0e-5)
# search direction norm
self.assertLess(np.abs(status["search_direction_norm"]), 1.0e-5)
# solve status
self.assertTrue(status["solve_status"] == 0)
# cost difference
self.assertLess(np.abs(status["cost_difference"]), 1.0e-5)
def test_prior_weights(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# dimension
dim = configuration_length * model.nv
# get uninitialized (zero) matrix
prior0 = batch.prior_weights()
# test
self.assertTrue(prior0.shape == (dim, dim))
self.assertTrue(not prior0.any())
# identity
in_weights = np.eye(dim)
out_prior = batch.prior_weights(weights=in_weights)
# test
self.assertLess(np.linalg.norm(in_weights - out_prior), 1.0e-4)
def test_norm(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# get norm data
data = batch.norm()
# test norm types
self.assertTrue((data["sensor_type"] == np.zeros(model.nsensor)).all())
# test norm paramters
self.assertTrue(not data["sensor_parameters"].any())
# set norm data
sensor_type = np.array([1, 2, 3, 4])
sensor_parameters = np.random.rand(3 * model.nsensor)
data = batch.norm(
sensor_type=sensor_type,
sensor_parameters=sensor_parameters,
)
# test
self.assertTrue((sensor_type == data["sensor_type"]).all())
self.assertLess(
np.linalg.norm(sensor_parameters - data["sensor_parameters"]),
1.0e-5,
)
def test_sensor_info(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 5
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# get sensor info
info = batch.sensor_info()
# test
self.assertTrue(info["start_index"] == 0)
self.assertTrue(info["num_measurements"] == 4)
self.assertTrue(info["dim_measurements"] == 4)
def test_parameters(self):
# load model
model_path = (
pathlib.Path(__file__).parent.parent.parent
/ "mjpc/test/testdata/estimator/particle/task1D_framepos.xml"
)
model = mujoco.MjModel.from_xml_path(str(model_path))
# initialize
configuration_length = 3
batch = batch_lib.Batch(
model=model, configuration_length=configuration_length
)
# random parameters
parameters = np.random.normal(size=6, scale=1.0e-1)
# set / get data
data = batch.data(0, parameters=parameters)
# test
self.assertLess(np.linalg.norm(data["parameters"] - parameters), 1.0e-5)
# noise
noise = np.random.normal(size=6, scale=1.0)
data = batch.noise(parameter=noise)
# test
self.assertLess(np.linalg.norm(data["parameter"] - noise), 1.0e-5)
if __name__ == "__main__":
absltest.main()
| mujoco_mpc-main | python/mujoco_mpc/batch_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mujoco
import matplotlib.pyplot as plt
import mediapy as media
import numpy as np
# set current directory to mjpc/python/mujoco_mpc
from mujoco_mpc import batch as batch_lib
# %%
# 1D Particle Model
xml = """
<mujoco model="Particle1D">
<visual>
<headlight ambient=".4 .4 .4" diffuse=".8 .8 .8" specular="0.1 0.1 0.1"/>
<map znear=".01"/>
<quality shadowsize="2048"/>
</visual>
<asset>
<texture name="skybox" type="skybox" builtin="gradient" rgb1="0 0 0" rgb2="0 0 0"
width="800" height="800" mark="random" markrgb="0 0 0"/>
</asset>
<asset>
<texture name="grid" type="2d" builtin="checker" rgb1=".1 .2 .3" rgb2=".2 .3 .4" width="300" height="300" mark="edge" markrgb=".2 .3 .4"/>
<material name="grid" texture="grid" texrepeat="1 1" texuniform="true" reflectance=".2"/>
<material name="self" rgba=".7 .5 .3 1"/>
</asset>
<option timestep="0.001" />
<worldbody>
<light name="light" pos="0 0 1"/>
<geom name="ground" type="plane" pos="0 0 0" size=".3 .3 .1" material="grid" />
<body name="pointmass" pos="0 0 0">
<joint name="root_z" type="slide" damping="0" pos="0 0 0" axis="0 0 1" />
<geom name="pointmass" type="sphere" size=".01" material="self" mass="1.0"/>
</body>
</worldbody>
<actuator>
<motor name="z_motor" joint="root_z"/>
</actuator>
<sensor>
<jointpos name="joint_z" joint="root_z" />
</sensor>
</mujoco>"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model)
# %%
## rollout
np.random.seed(0)
# simulation horizon
T = 1000
# trajectories
qpos = np.zeros((model.nq, T))
qvel = np.zeros((model.nv, T))
qacc = np.zeros((model.nv, T))
ctrl = np.zeros((model.nu, T))
qfrc = np.zeros((model.nv, T))
sensor = np.zeros((model.nsensordata, T))
noisy_sensor = np.zeros((model.nsensordata, T))
time = np.zeros(T)
# set initial state
mujoco.mj_resetData(model, data)
data.qpos[0] = 0.025
# frames
frames = []
FPS = 1.0 / model.opt.timestep
# simulate
for t in range(T):
# set ctrl
data.ctrl = np.zeros(model.nu)
# forward dynamics
mujoco.mj_forward(model, data)
# cache
qpos[:, t] = data.qpos
qvel[:, t] = data.qvel
qacc[:, t] = data.qacc
ctrl[:, t] = data.ctrl
qfrc[:, t] = data.qfrc_actuator
sensor[:, t] = data.sensordata
time[t] = data.time
# noisy sensors
noisy_sensor[:, t] = sensor[:, t] + np.random.normal(
scale=1.0e-3, size=model.nsensordata
)
# intergrate with Euler
mujoco.mj_Euler(model, data)
# render and save frames
# renderer.update_scene(data)
# pixels = renderer.render()
# frames.append(pixels)
# display video.
# SLOWDOWN = 0.5
# media.show_video(frames, fps=SLOWDOWN * FPS)
# %%
# plot position
fig = plt.figure()
# position (sensor)
plt.plot(time, noisy_sensor[0, :], label="sensor", ls="--", color="cyan")
# position (simulation)
plt.plot(time, qpos[0, :], label="simulation", color="black")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Position")
# plot velocity
fig = plt.figure()
# velocity (simulation)
plt.plot(time, qvel[0, :], label="simulation", color="black")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Velocity")
# %%
# estimator model
model_estimator = mujoco.MjModel.from_xml_string(xml)
# batch estimator
configuration_length = T
estimator = batch_lib.Batch(
model=model_estimator, configuration_length=configuration_length
)
# %%
# configuration initialization
qinit = np.zeros((model.nq, configuration_length))
# set data in estimator
for t in range(configuration_length):
# constant initialization
qinit[:, t] = qpos[:, 0] + np.random.normal(scale=1.0e-3, size=model.nq)
# set data
estimator.data(
t,
configuration=qinit[:, t],
ctrl=ctrl[:, t],
sensor_measurement=noisy_sensor[:, t],
force_measurement=qfrc[:, t],
time=np.array([time[t]]),
)
# %%
# set noise std
estimator.noise(
process=np.full(model.nv, 1.0), sensor=np.full(model.nsensor, 5.0e-1)
)
# set settings
estimator.settings(
sensor_flag=True,
force_flag=True,
max_smoother_iterations=100,
max_search_iterations=1000,
regularization_initial=1.0e-12,
gradient_tolerance=1.0e-6,
search_direction_tolerance=1.0e-6,
cost_tolerance=1.0e-6,
)
# optimize
estimator.optimize()
# costs
estimator.print_cost()
# status
estimator.print_status()
# %%
# get estimator solution
q_est = np.zeros((model_estimator.nq, configuration_length))
v_est = np.zeros((model_estimator.nv, configuration_length))
s_est = np.zeros((model_estimator.nsensordata, configuration_length))
t_est = np.zeros(configuration_length)
for t in range(configuration_length):
data_ = estimator.data(t)
q_est[:, t] = data_["configuration"]
v_est[:, t] = data_["velocity"]
s_est[:, t] = data_["sensor_prediction"]
t_est[t] = data_["time"]
# plot position
fig = plt.figure()
# position
plt.plot(time, qpos[0, :], label="simulation", color="black")
plt.plot(time, qinit[0, :], label="initialization", color="orange")
plt.plot(t_est, q_est[0, :], label="estimation", ls="--", color="magenta")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Position")
# plot velocity
fig = plt.figure()
# velocity (simulation)
plt.plot(time, qvel[0, :], label="simulation", color="black")
# velocity (estimator)
plt.plot(t_est[1:], v_est[0, 1:], label="estimation", ls="--", color="magenta")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Velocity")
| mujoco_mpc-main | python/mujoco_mpc/demos/batch/particle_smoother.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import mediapy as media
import mujoco
import numpy as np
# set current directory to mjpc/python/mujoco_mpc
from mujoco_mpc import batch as batch_lib
# %matplotlib inline
# cart-pole model
xml = """
<mujoco model="Cartpole">
<visual>
<headlight ambient=".4 .4 .4" diffuse=".8 .8 .8" specular="0.1 0.1 0.1"/>
<map znear=".01"/>
<quality shadowsize="2048"/>
<global elevation="-15" offwidth="1920" offheight="1080"/>
</visual>
<asset>
<texture name="blue_grid" type="2d" builtin="checker" rgb1=".02 .14 .44" rgb2=".27 .55 1" width="300" height="300" mark="edge" markrgb="1 1 1"/>
<material name="blue_grid" texture="blue_grid" texrepeat="1 1" texuniform="true" reflectance=".2"/>
<texture name="grey_grid" type="2d" builtin="checker" rgb1=".26 .26 .26" rgb2=".6 .6 .6" width="300" height="300" mark="edge" markrgb="1 1 1"/>
<material name="grey_grid" texture="grey_grid" texrepeat="1 1" texuniform="true" reflectance=".2"/>
<texture name="skybox" type="skybox" builtin="gradient" rgb1=".66 .79 1" rgb2=".9 .91 .93" width="800" height="800"/>
<material name="self" rgba=".7 .5 .3 1"/>
<material name="self_default" rgba=".7 .5 .3 1"/>
<material name="self_highlight" rgba="0 .5 .3 1"/>
<material name="effector" rgba=".7 .4 .2 1"/>
<material name="effector_default" rgba=".7 .4 .2 1"/>
<material name="effector_highlight" rgba="0 .5 .3 1"/>
<material name="decoration" rgba=".2 .6 .3 1"/>
<material name="eye" rgba="0 .2 1 1"/>
<material name="target" rgba=".6 .3 .3 1"/>
<material name="target_default" rgba=".6 .3 .3 1"/>
<material name="target_highlight" rgba=".6 .3 .3 .4"/>
<material name="site" rgba=".5 .5 .5 .3"/>
</asset>
<option timestep="0.01">
<flag contact="disable"/>
</option>
<default>
<default class="pole">
<joint type="hinge" axis="0 1 0" damping="2e-6"/>
<geom type="capsule" fromto="0 0 0 0 0 1" size="0.045" material="self" mass=".1"/>
</default>
</default>
<worldbody>
<light name="light" pos="0 0 6"/>
<camera name="fixed" pos="0 -4 1" zaxis="0 -1 0"/>
<camera name="lookatcart" mode="targetbody" target="cart" pos="0 -2 2"/>
<geom name="floor" pos="0 0 -.05" size="4 4 .2" type="plane" material="blue_grid"/>
<geom name="rail1" type="capsule" pos="0 .07 1" zaxis="1 0 0" size="0.02 2" material="decoration" />
<geom name="rail2" type="capsule" pos="0 -.07 1" zaxis="1 0 0" size="0.02 2" material="decoration" />
<body name="cart" pos="0 0 1">
<joint name="slider" type="slide" limited="true" axis="1 0 0" range="-1.8 1.8" solreflimit=".08 1" solimplimit="0 0.95 0.001" damping="1.0e-4"/>
<geom name="cart" type="box" size="0.2 0.15 0.1" material="self" mass="1"/>
<body name="pole_1" childclass="pole">
<joint name="hinge_1" damping="1.0e-4"/>
<geom name="pole_1"/>
<site name="tip" pos="0 0 1"/>
</body>
</body>
</worldbody>
<actuator>
<motor name="slide" joint="slider" gear="10" ctrllimited="true" ctrlrange="-1 1" />
</actuator>
<sensor>
<jointpos name="slider" joint="slider"/>
<jointpos name="hinge_1" joint="hinge_1"/>
</sensor>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model, height=768, width=1366)
# linear interpolation initialization
T = 500
q0 = np.array([0.0, np.pi])
qT = np.array([0.0, 0.0])
# compute linear interpolation
qinterp = np.zeros((model.nq, T))
for t in range(T):
# slope
slope = (qT - q0) / T
# interpolation
qinterp[:, t] = q0 + t * slope
# time
time = [t * model.opt.timestep for t in range(T)]
# plot position
fig = plt.figure()
# arm position
plt.plot(time, qinterp[0, :], label="q0 interpolation", color="orange")
plt.plot(time, qinterp[1, :], label="q1 interpolation", color="cyan")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Configuration")
# estimator model
model_estimator = mujoco.MjModel.from_xml_string(xml)
# batch estimator
configuration_length = T
estimator = batch_lib.Batch(
model=model_estimator,
configuration_length=configuration_length,
)
# set data
for t in range(configuration_length):
# unpack
qt = np.zeros(model.nq)
st = np.zeros(model.nsensordata)
mt = np.zeros(model.nsensor)
ft = np.zeros(model.nv)
tt = np.array([t * model.opt.timestep])
# set initial state
if t == 0 or t == 1:
qt = q0
st = q0
mt = np.array([1, 1], dtype=int)
# set goal
elif t >= configuration_length - 2:
qt = qT
st = qT
mt = np.array([1, 1], dtype=int)
# initialize qpos
else:
qt = qinterp[:, t]
mt = np.array([0, 0], dtype=int)
# set data
data_ = estimator.data(
t,
configuration=qt,
sensor_measurement=st,
sensor_mask=mt,
force_measurement=ft,
time=tt,
)
# set std^2
estimator.noise(process=np.array([1.0e-2, 1.0e-8]), sensor=np.array([1.0, 1.0]))
# set settings
estimator.settings(
sensor_flag=True,
force_flag=True,
max_smoother_iterations=1000,
max_search_iterations=1000,
regularization_initial=1.0e-12,
gradient_tolerance=1.0e-6,
search_direction_tolerance=1.0e-6,
cost_tolerance=1.0e-6,
first_step_position_sensors=True,
last_step_position_sensors=True,
last_step_velocity_sensors=True,
)
# optimize
estimator.optimize()
# costs
estimator.print_cost()
# status
estimator.print_status()
# get estimation trajectories
q_est = np.zeros((model_estimator.nq, configuration_length))
v_est = np.zeros((model_estimator.nv, configuration_length))
s_est = np.zeros((model_estimator.nsensordata, configuration_length))
f_est = np.zeros((model_estimator.nv, configuration_length))
t_est = np.zeros(configuration_length)
for t in range(configuration_length):
data_ = estimator.data(t)
q_est[:, t] = data_["configuration"]
v_est[:, t] = data_["velocity"]
s_est[:, t] = data_["sensor_prediction"]
f_est[:, t] = data_["force_prediction"]
t_est[t] = data_["time"]
# plot position
fig = plt.figure()
plt.plot(
time, qinterp[0, :], label="q0 (interpolation)", ls="--", color="orange"
)
plt.plot(time, qinterp[1, :], label="q1 (interpolation)", ls="--", color="cyan")
plt.plot(
t_est - model.opt.timestep,
q_est[0, :],
label="q0 (optimized)",
color="orange",
)
plt.plot(
t_est - model.opt.timestep,
q_est[1, :],
label="q1 (optimized)",
color="cyan",
)
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Configuration")
# plot velocity
fig = plt.figure()
# velocity
plt.plot(
t_est[1:] - model.opt.timestep,
v_est[0, 1:],
label="v0 (optimized)",
color="orange",
)
plt.plot(
t_est[1:] - model.opt.timestep,
v_est[1, 1:],
label="v1 (optimized)",
color="cyan",
)
# plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Velocity")
# frames optimized
frames_opt = []
# simulate
for t in range(configuration_length - 1):
# set configuration
data.qpos = q_est[:, t]
data.qvel = v_est[:, t]
mujoco.mj_forward(model, data)
# render and save frames
renderer.update_scene(data)
pixels = renderer.render()
frames_opt.append(pixels)
# display video
media.show_video(frames_opt, fps=1.0 / model.opt.timestep, loop=False)
# forces
fig = plt.figure()
plt.plot(t_est[1:-1], f_est[0, 1:-1], color="orange", label="slider")
plt.plot(
t_est[1:-1], f_est[1, 1:-1], color="cyan", label="hinge (magic force)"
) # should be ~0
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Forces")
# qfrc verification with forward dynamics
mujoco.mj_resetData(model, data)
data.qpos = q_est[:, 1]
data.qvel = v_est[:, 1]
Qpos = np.zeros((model.nq, T))
for t in range(1, T - 1):
data.qfrc_applied = f_est[:, t]
mujoco.mj_step(model, data)
Qpos[:, t] = data.qpos
# plot position
fig = plt.figure()
plt.plot(
time[1:],
Qpos[0, 1:],
label="q0 (forward simulation)",
ls="--",
color="orange",
)
plt.plot(
time[1:],
Qpos[1, 1:],
label="q1 (forward simulation)",
ls="--",
color="cyan",
)
plt.plot(
t_est - model.opt.timestep,
q_est[0, :],
label="q0 (optimized)",
color="orange",
)
plt.plot(
t_est - model.opt.timestep,
q_est[1, :],
label="q1 (optimized)",
color="cyan",
)
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Configuration")
| mujoco_mpc-main | python/mujoco_mpc/demos/batch/cartpole_trajopt.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mujoco
import numpy as np
import matplotlib.pyplot as plt
import mediapy as media
# set current directory to mjpc/python/mujoco_mpc
from mujoco_mpc import batch as batch_lib
# %%
# 2D Particle Model
xml = """
<mujoco model="Particle">
<visual>
<headlight ambient=".4 .4 .4" diffuse=".8 .8 .8" specular="0.1 0.1 0.1"/>
<map znear=".01"/>
<quality shadowsize="2048"/>
<global elevation="-15"/>
</visual>
<asset>
<texture name="blue_grid" type="2d" builtin="checker" rgb1=".02 .14 .44" rgb2=".27 .55 1" width="300" height="300" mark="edge" markrgb="1 1 1"/>
<material name="blue_grid" texture="blue_grid" texrepeat="1 1" texuniform="true" reflectance=".2"/>
<texture name="skybox" type="skybox" builtin="gradient" rgb1=".66 .79 1" rgb2=".9 .91 .93" width="800" height="800"/>
<material name="self" rgba=".7 .5 .3 1"/>
<material name="decoration" rgba=".2 .6 .3 1"/>
</asset>
<option timestep="0.01"></option>
<default>
<joint type="hinge" axis="0 0 1" limited="true" range="-.29 .29" damping="1"/>
<motor gear=".1" ctrlrange="-1 1" ctrllimited="true"/>
</default>
<worldbody>
<light name="light" pos="0 0 1"/>
<camera name="fixed" pos="0 0 .75" quat="1 0 0 0"/>
<geom name="ground" type="plane" pos="0 0 0" size=".3 .3 .1" material="blue_grid"/>
<geom name="wall_x" type="plane" pos="-.3 0 .02" zaxis="1 0 0" size=".02 .3 .02" material="decoration"/>
<geom name="wall_y" type="plane" pos="0 -.3 .02" zaxis="0 1 0" size=".3 .02 .02" material="decoration"/>
<geom name="wall_neg_x" type="plane" pos=".3 0 .02" zaxis="-1 0 0" size=".02 .3 .02" material="decoration"/>
<geom name="wall_neg_y" type="plane" pos="0 .3 .02" zaxis="0 -1 0" size=".3 .02 .02" material="decoration"/>
<body name="pointmass" pos="0 0 .01">
<camera name="cam0" pos="0 -0.3 0.3" xyaxes="1 0 0 0 0.7 0.7"/>
<joint name="root_x" type="slide" pos="0 0 0" axis="1 0 0" />
<joint name="root_y" type="slide" pos="0 0 0" axis="0 1 0" />
<geom name="pointmass" type="sphere" size=".01" material="self" mass=".3"/>
<site name="tip" pos="0 0 0" size="0.01"/>
</body>
</worldbody>
<actuator>
<motor name="x_motor" joint="root_x" gear="1" ctrllimited="true" ctrlrange="-1 1"/>
<motor name="y_motor" joint="root_y" gear="1" ctrllimited="true" ctrlrange="-1 1"/>
</actuator>
<sensor>
<jointpos name="x" joint="root_x" />
<jointpos name="y" joint="root_y" />
</sensor>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model)
# %%
# initialization
T = 100
q0 = np.array([-0.25, -0.25])
qM = np.array([-0.25, 0.25])
qN = np.array([0.25, -0.25])
qT = np.array([0.25, 0.25])
# compute linear interpolation
qinterp = np.zeros((model.nq, T))
for t in range(T):
# slope
slope = (qT - q0) / T
# interpolation
qinterp[:, t] = q0 + t * slope
# time
time = [t * model.opt.timestep for t in range(T)]
# %%
# plot position
fig = plt.figure()
# arm position
plt.plot(qinterp[0, :], qinterp[1, :], label="interpolation", color="black")
plt.plot(q0[0], q0[1], color="magenta", label="waypoint", marker="o")
plt.plot(qM[0], qM[1], color="magenta", marker="o")
plt.plot(qN[0], qN[1], color="magenta", marker="o")
plt.plot(qT[0], qT[1], color="magenta", marker="o")
plt.legend()
plt.xlabel("X")
plt.ylabel("Y")
# %%
# estimator model
model_estimator = mujoco.MjModel.from_xml_string(xml)
# batch estimator
configuration_length = T + 2
estimator = batch_lib.Batch(
model=model_estimator,
configuration_length=configuration_length,
)
# %%
# set data
for t in range(configuration_length):
# unpack
qt = np.zeros(model.nq)
st = np.zeros(model.nsensordata)
mt = np.zeros(model.nsensor)
ft = np.zeros(model.nv)
ct = np.zeros(model.nu)
tt = np.array([t * model.opt.timestep])
# set initial state
if t == 0 or t == 1:
qt = q0
st = q0
mt = np.array([1, 1])
# set goal
elif t >= configuration_length - 2:
qt = qT
st = qT
mt = np.array([1, 1])
# set waypoint
elif t == 25:
st = qM
mt = np.array([1, 1])
# set waypoint
elif t == 75:
st = qN
mt = np.array([1, 1])
# initialize qpos
else:
qt = qinterp[:, t - 1]
mt = np.array([0, 0])
# set data
data_ = estimator.data(
t,
configuration=qt,
ctrl=ct,
sensor_measurement=st,
sensor_mask=mt,
force_measurement=ft,
time=tt,
)
# %%
# set std
estimator.noise(process=np.array([1000.0, 1000.0]), sensor=np.array([1.0, 1.0]))
# set settings
estimator.settings(
sensor_flag=True,
force_flag=True,
max_smoother_iterations=1000,
max_search_iterations=1000,
regularization_initial=1.0e-12,
gradient_tolerance=1.0e-6,
search_direction_tolerance=1.0e-6,
cost_tolerance=1.0e-6,
first_step_position_sensors=True,
last_step_position_sensors=True,
last_step_velocity_sensors=True,
)
# optimize
estimator.optimize()
# costs
estimator.print_cost()
# status
estimator.print_status()
# %%
# get estimated trajectories
q_est = np.zeros((model_estimator.nq, configuration_length))
v_est = np.zeros((model_estimator.nv, configuration_length))
s_est = np.zeros((model_estimator.nsensordata, configuration_length))
f_est = np.zeros((model_estimator.nv, configuration_length))
t_est = np.zeros(configuration_length)
for t in range(configuration_length):
data_ = estimator.data(t)
q_est[:, t] = data_["configuration"]
v_est[:, t] = data_["velocity"]
s_est[:, t] = data_["sensor_prediction"]
f_est[:, t] = data_["force_prediction"]
t_est[t] = data_["time"]
# %%
# plot position
fig = plt.figure()
plt.plot(qinterp[0, :], qinterp[1, :], label="interpolation", color="black")
plt.plot(q_est[0, :], q_est[1, :], label="direct trajopt", color="orange")
plt.plot(q0[0], q0[1], color="magenta", label="waypoint", marker="o")
plt.plot(qM[0], qM[1], color="magenta", marker="o")
plt.plot(qN[0], qN[1], color="magenta", marker="o")
plt.plot(qT[0], qT[1], color="magenta", marker="o")
plt.legend()
plt.xlabel("X")
plt.ylabel("Y")
# plot velocity
fig = plt.figure()
# velocity
plt.plot(t_est[1:] - model.opt.timestep, v_est[0, 1:], label="v0", color="cyan")
plt.plot(
t_est[1:] - model.opt.timestep, v_est[1, 1:], label="v1", color="orange"
)
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Velocity")
# %%
# frames optimized
frames_opt = []
# simulate
for t in range(configuration_length - 1):
# get solution from estimator
data_ = estimator.data(t)
# set configuration
data.qpos = q_est[:, t]
data.qvel = v_est[:, t]
mujoco.mj_forward(model, data)
# render and save frames
renderer.update_scene(data)
pixels = renderer.render()
frames_opt.append(pixels)
# display video
# media.show_video(frames_opt, fps=1.0 / model.opt.timestep, loop=False)
| mujoco_mpc-main | python/mujoco_mpc/demos/batch/particle_trajopt.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import mediapy as media
import mujoco
import numpy as np
from mujoco_mpc import batch as batch_lib
# %matplotlib inline
# more legible printing from numpy
np.set_printoptions(precision=6, suppress=True, linewidth=100)
# load model
xml = """
<mujoco>
<option timestep="0.005"/>
<default>
<geom solimp="0 0.95 0.001"/>
</default>
<visual>
<headlight ambient=".4 .4 .4" diffuse=".8 .8 .8" specular="0.1 0.1 0.1"/>
<map znear=".01"/>
<quality shadowsize="2048"/>
<global offheight="1024" offwidth="1024"/>
</visual>
<asset>
<texture name="skybox" type="skybox" builtin="gradient" rgb1="0 0 0" rgb2="0 0 0"
width="800" height="800" mark="random" markrgb="0 0 0"/>
</asset>
<asset>
<texture name="grid" type="2d" builtin="checker" rgb1=".1 .2 .3" rgb2=".2 .3 .4" width="300" height="300" mark="edge" markrgb=".2 .3 .4"/>
<material name="grid" texture="grid" texrepeat="1 1" texuniform="true" reflectance=".2"/>
<material name="self" rgba=".7 .5 .3 1"/>
</asset>
<worldbody>
<light diffuse=".5 .5 .5" pos="0 0 3" dir="0 0 -1"/>
<camera pos="-0.079 -0.587 0.400" xyaxes="0.951 -0.310 0.000 0.133 0.410 0.902"/>
<geom name="ground" type="plane" pos="0 0 0" size=".3 .3 .1" material="grid"/>
<geom size=".07" pos="-.03 0.03 0"/>
<body name="m1" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m2" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m3" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m4" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m5" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m6" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m7" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="m8" mocap="true" pos="0.1 0.1 0.1">
<geom type="sphere" size="0.0075" contype="0" conaffinity="0" rgba="1 0 0 0.5"/>
</body>
<body name="root" pos="0 0 0.25">
<joint type="free"/>
<geom type="box" size=".05 .05 .05" material="self" mass="1.0"/>
<site name="corner1" type="sphere" size="0.05" rgba="1 0 0 0" pos=".05 .05 .05"/>
<site name="corner2" type="sphere" size="0.05" rgba="1 0 0 0" pos="-.05 .05 .05"/>
<site name="corner3" type="sphere" size="0.05" rgba="1 0 0 0" pos=".05 -.05 .05"/>
<site name="corner4" type="sphere" size="0.05" rgba="1 0 0 0" pos=".05 .05 -.05"/>
<site name="corner5" type="sphere" size="0.05" rgba="1 0 0 0" pos="-.05 -.05 .05"/>
<site name="corner6" type="sphere" size="0.05" rgba="1 0 0 0" pos=".05 -.05 -.05"/>
<site name="corner7" type="sphere" size="0.05" rgba="1 0 0 0" pos="-.05 .05 -.05"/>
<site name="corner8" type="sphere" size="0.05" rgba="1 0 0 0" pos="-.05 -.05 -.05"/>
</body>
</worldbody>
<sensor>
<!-- corner positions -->
<framepos name="corner_position1" objtype="site" objname="corner1"/>
<framepos name="corner_position2" objtype="site" objname="corner2"/>
<framepos name="corner_position3" objtype="site" objname="corner3"/>
<framepos name="corner_position4" objtype="site" objname="corner4"/>
<framepos name="corner_position5" objtype="site" objname="corner5"/>
<framepos name="corner_position6" objtype="site" objname="corner6"/>
<framepos name="corner_position7" objtype="site" objname="corner7"/>
<framepos name="corner_position8" objtype="site" objname="corner8"/>
</sensor>
</mujoco>
"""
model = mujoco.MjModel.from_xml_string(xml)
data = mujoco.MjData(model)
renderer = mujoco.Renderer(model, height=720, width=720)
# enable joint visualization option:
scene_option = mujoco.MjvOption()
# random seed
np.random.seed(0)
# rollout
T = 200
# trajectories
qpos = np.zeros((model.nq, T))
qvel = np.zeros((model.nv, T))
qacc = np.zeros((model.nv, T))
ctrl = np.zeros((model.nu, T))
qfrc = np.zeros((model.nv, T))
sensor = np.zeros((model.nsensordata, T))
noisy_sensor = np.zeros((model.nsensordata, T))
time = np.zeros(T)
# rollout
mujoco.mj_resetData(model, data)
# frames
frames = []
FPS = 1.0 / model.opt.timestep
# simulate
for t in range(T):
# forward evaluation
mujoco.mj_forward(model, data)
# cache
qpos[:, t] = data.qpos
qvel[:, t] = data.qvel
qacc[:, t] = data.qacc
ctrl[:, t] = data.ctrl
qfrc[:, t] = data.qfrc_actuator
sensor[:, t] = data.sensordata
time[t] = data.time
# noisy sensor
noisy_sensor[:, t] = sensor[:, t] + np.random.normal(
scale=2.5e-3, size=model.nsensordata
)
# set mocap
for i in range(8):
data.mocap_pos[i, :] = noisy_sensor[(3 * i) : (3 * (i + 1)), t]
# Euler
mujoco.mj_Euler(model, data)
# render and save frames
renderer.update_scene(data, camera=0, scene_option=scene_option)
pixels = renderer.render()
frames.append(pixels)
# display video
media.show_video(frames, fps=0.25 * FPS)
## noisy sensor measurements
# corner sensor
sensor_index = 7
# plot
fig = plt.figure()
plt.plot(
time, sensor[0 + 3 * sensor_index, :], color="blue", label="x (simulation)"
)
plt.plot(
time,
noisy_sensor[0 + 3 * sensor_index, :],
ls="--",
color="blue",
label="x (sensor)",
)
plt.plot(
time, sensor[1 + 3 * sensor_index, :], color="red", label="y (simulation)"
)
plt.plot(
time,
noisy_sensor[1 + 3 * sensor_index, :],
ls="--",
color="red",
label="y (sensor)",
)
plt.plot(
time, sensor[2 + 3 * sensor_index, :], color="green", label="z (simulation)"
)
plt.plot(
time,
noisy_sensor[2 + 3 * sensor_index, :],
ls="--",
color="green",
label="z (sensor)",
)
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Position")
# initialize estimator
configuration_length = T
estimator = batch_lib.Batch(
model=model,
configuration_length=configuration_length,
)
# random seed
np.random.seed(0)
# initialization
qinit = np.zeros((model.nq, configuration_length))
for t in range(configuration_length):
# perturbed initial configuration
q = np.array(qpos[:, t])
dq = np.hstack([
np.random.normal(size=3, scale=1.0e-2),
np.random.normal(size=3, scale=1.0e-1),
])
mujoco.mj_integratePos(model, q, dq, 1.0)
qinit[:, t] = q
# set data
for t in range(configuration_length):
# unpack
qt = qinit[:, t]
st = noisy_sensor[:, t]
ft = qfrc[:, t]
# set data
data_ = estimator.data(
t, configuration=qt, sensor_measurement=st, force_measurement=ft
)
# set weight
estimator.noise(
sensor=np.full(model.nsensor, 1.0e-1), process=np.full(model.nv, 1.0e-6)
)
# set settings
estimator.settings(
sensor_flag=True,
force_flag=True,
max_smoother_iterations=1000,
max_search_iterations=1000,
regularization_initial=1.0e-6,
gradient_tolerance=1.0e-5,
cost_tolerance=1.0e-16,
last_step_position_sensors=True,
last_step_velocity_sensors=True,
)
# optimize
estimator.optimize()
# costs
estimator.print_cost()
# status
estimator.print_status()
# get optimized trajectories
qopt = np.zeros((model.nq, configuration_length))
vopt = np.zeros((model.nv, configuration_length))
aopt = np.zeros((model.nv, configuration_length))
sopt = np.zeros((model.nsensordata, configuration_length))
for t in range(configuration_length):
data_ = estimator.data(t)
qopt[:, t] = data_["configuration"]
sopt[:, t] = data_["sensor_prediction"]
if t == 0:
continue
vopt[:, t] = data_["velocity"]
if t == configuration_length - 1:
continue
aopt[:, t] = data_["acceleration"]
## configuration plots
# plot
fig = plt.figure()
# configuration
plt.plot(time, qpos[0, :], label="x (simulation)", color="blue")
plt.plot(time, qpos[1, :], label="z (simulation)", color="red")
plt.plot(time, qpos[2, :], label="y (simulation)", color="green")
# initialized configurations
plt.plot(time, qinit[0, :], ls=":", label="x (initialization)", color="blue")
plt.plot(time, qinit[1, :], ls=":", label="z (initialization)", color="red")
plt.plot(time, qinit[2, :], ls=":", label="y (initialization)", color="green")
# # optimized configurations
plt.plot(time, qopt[0, :], ls="--", label="x (optimized)", color="blue")
plt.plot(time, qopt[1, :], ls="--", label="z (optimized)", color="red")
plt.plot(time, qopt[2, :], ls="--", label="y (optimized)", color="green")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Configuration")
## orientation plots
# plot
fig = plt.figure()
# simulation
plt.plot(time, qpos[3, :], label="q0 (simulation)", color="blue")
plt.plot(time, qpos[4, :], label="q1 (simulation)", color="red")
plt.plot(time, qpos[5, :], label="q2 (simulation)", color="green")
plt.plot(time, qpos[6, :], label="q3 (simulation)", color="orange")
# initialized configurations
plt.plot(time, qinit[3, :], ls=":", label="q0 (inititialized)", color="blue")
plt.plot(time, qinit[4, :], ls=":", label="q1 (inititialized)", color="red")
plt.plot(time, qinit[5, :], ls=":", label="q2 (inititialized)", color="green")
plt.plot(time, qinit[6, :], ls=":", label="q3 (inititialized)", color="orange")
# optimized configurations
plt.plot(time, qopt[3, :], ls="--", label="q0 (optimized)", color="blue")
plt.plot(time, qopt[4, :], ls="--", label="q1 (optimized)", color="red")
plt.plot(time, qopt[5, :], ls="--", label="q2 (optimized)", color="green")
plt.plot(time, qopt[6, :], ls="--", label="q3 (optimized)", color="orange")
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Orientation")
## velocity plots
# plot
fig = plt.figure()
# configuration
plt.plot(time, qvel[0, :], label="vx (simulation)", color="blue")
plt.plot(time, qvel[1, :], label="vz (simulation)", color="red")
plt.plot(time, qvel[2, :], label="vr (simulation)", color="green")
# optimized configurations
plt.plot(
time[1:configuration_length],
vopt[0, 1:configuration_length],
ls="--",
label="vx (optimized)",
color="blue",
)
plt.plot(
time[1:configuration_length],
vopt[1, 1:configuration_length],
ls="--",
label="vz (optimized)",
color="red",
)
plt.plot(
time[1:configuration_length],
vopt[2, 1:configuration_length],
ls="--",
label="vr (optimized)",
color="green",
)
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Velocity")
## acceleration plots
# plot
fig = plt.figure()
# configuration
plt.plot(time, qacc[0, :], label="ax (simulation)", color="blue")
plt.plot(time, qacc[1, :], label="az (simulation)", color="red")
plt.plot(time, qacc[2, :], label="ar (simulation)", color="green")
# optimized configurations
plt.plot(
time[1 : configuration_length - 1],
aopt[0, 1 : configuration_length - 1],
ls="--",
label="ax (optimized)",
color="blue",
)
plt.plot(
time[1 : configuration_length - 1],
aopt[1, 1 : configuration_length - 1],
ls="--",
label="az (optimized)",
color="red",
)
plt.plot(
time[1 : configuration_length - 1],
aopt[2, 1 : configuration_length - 1],
ls="--",
label="ar (optimized)",
color="green",
)
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Acceleration")
# frames optimized
frames_opt = frames.copy()
# simulate
for t in range(configuration_length - 1):
# initializaiton
data.qpos = qinit[:, t]
mujoco.mj_differentiatePos(
model, data.qvel, model.opt.timestep, qinit[:, t], qinit[:, t + 1]
)
# forward
mujoco.mj_forward(model, data)
# set mocap
for i in range(8):
data.mocap_pos[i, :] = sopt[(i * 3) : ((i + 1) * 3), t + 1]
# render and save frames
renderer.update_scene(data, camera=0, scene_option=scene_option)
pixels_init = renderer.render()
# set configuration
data.qpos = qopt[:, t + 1]
data.qvel = vopt[:, t + 1]
# forward
mujoco.mj_forward(model, data)
# don't visualize mocap for initialization
data.mocap_pos = np.zeros((8, 3))
# render and save frames
renderer.update_scene(data, camera=0, scene_option=scene_option)
pixels = renderer.render()
frames_opt[t] = np.hstack((frames_opt[t], pixels_init, pixels))
# display video
media.show_video(
frames_opt[: configuration_length - 2], fps=0.2 * FPS, loop=False
)
print("simulation | initialization | optimized")
| mujoco_mpc-main | python/mujoco_mpc/demos/batch/box_drop_smoother.py |
"""Merge the robotiq and panda models."""
import sys
merge_filename = sys.argv[1]
panda_filename = merge_filename.replace('panda_robotiq.xml',
'panda_nohand.xml')
robotiq_filename = merge_filename.replace('panda_robotiq.xml',
'2f85.xml')
with open(panda_filename) as panda_file:
panda = panda_file.read()
with open(robotiq_filename) as robotiq_file:
robotiq = robotiq_file.read()
# insert defaults
default_begin_index = robotiq.index('<default>') # include default tag
last_default_index = robotiq.rindex('</default>')
defaults = robotiq[default_begin_index: last_default_index]
panda = panda.replace('<default>', defaults)
# insert assets
asset_begin_index = robotiq.index('<asset>') # include asset tag
asset_close_index = robotiq.index('</asset>', asset_begin_index)
assets = robotiq[asset_begin_index:asset_close_index]
panda = panda.replace('<asset>', assets)
# attach model
worldbody_index = robotiq.index('<worldbody>') + len('<worldbody>')
close_worldbody_index = robotiq.index('</worldbody>', worldbody_index)
robotiq_body = robotiq[worldbody_index:close_worldbody_index]
panda = panda.replace('<site name="attachment_site"/>', robotiq_body)
# insert bottom: contact, tendon, equality
contact_begin_index = robotiq.index('</worldbody>') # include closing tag
equality_close_index = robotiq.index(
'</equality>', contact_begin_index) + len('</equality>')
bottom = robotiq[contact_begin_index:equality_close_index]
panda = panda.replace('</worldbody>', bottom)
# add gravity compensation to all bodies
panda = panda.replace('<body ', '<body gravcomp="1" ')
# eliminate contact with the target
panda = panda.replace('priority="1"',
'priority="1" contype="6" conaffinity="5"')
panda = panda.replace(
'<geom type="mesh" group="3"/>',
'<geom type="mesh" group="3" contype="2" conaffinity="1"/>')
# add cartesian actuators
cartesian_actuators = '''
<actuator>
<general name="x" site="pinch" refsite="pedestal" ctrlrange="-.5 .5" ctrllimited="true" gainprm="1000" biasprm="0 -1000 -200" biastype="affine" gear="1 0 0 0 0 0"/>
<general name="y" site="pinch" refsite="pedestal" ctrlrange="-.5 .5" ctrllimited="true" gainprm="1000" biasprm="0 -1000 -200" biastype="affine" gear="0 1 0 0 0 0"/>
<general name="z" site="pinch" refsite="pedestal" ctrlrange="-.5 .5" ctrllimited="true" gainprm="1000" biasprm="300 -1000 -200" biastype="affine" gear="0 0 1 0 0 0"/>
<general name="rx" site="pinch" refsite="world" ctrlrange="-.5 .5" ctrllimited="true" gainprm="100" biasprm="0 -100 -20" biastype="affine" gear="0 0 0 1 0 0"/>
<general name="ry" site="pinch" refsite="world" ctrlrange="-.5 .5" ctrllimited="true" gainprm="100" biasprm="0 -100 -20" biastype="affine" gear="0 0 0 0 1 0"/>
<general name="rz" site="pinch" refsite="world" ctrlrange="-1.5 1.5" ctrllimited="true" gainprm="10" biasprm="0 -10 -2" biastype="affine" gear="0 0 0 0 0 1"/>
<position name="fingers" ctrllimited="true" forcelimited="true" ctrlrange="0 1" forcerange="-5 5" kp="40" tendon="split"/>
</actuator>
'''
actuator_begin_index = panda.index('<actuator>')
actuator_close_index = panda.index(
'</actuator>', actuator_begin_index) + len('</actuator>')
actuators = panda[actuator_begin_index:actuator_close_index]
panda = panda.replace(actuators, cartesian_actuators)
# remove panda keyframe
keyframe_begin_index = panda.index('<keyframe>') # keep tag (for removal)
keyframe_close_index = panda.index('</keyframe>') + len('</keyframe>')
panda = panda.replace(panda[keyframe_begin_index:keyframe_close_index], '')
with open(merge_filename, 'w') as merged_file:
merged_file.write(panda)
| mujoco_mpc-main | mjpc/tasks/manipulation/merge_panda_robotiq.py |
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Parses boolean expressions (as strings) to produce "calculation graph".
## Propositional logic
Given an expression like "a|((~a)&(b|c))", the function `parse` will output a
list of "op types" (which is variable names, or logical operands), and the list
of indices (relative to current op) that feed in to each operand. This is done
in an order that allows computation of the boolean expression. For example, the
above expression becomes:
(position): OPS: INPUTS:
0 a []
1 a []
2 ~ [-1]
3 b []
4 c []
5 | [-2, -1]
6 & [-4, -1]
7 | [-7, -1]
## First-order logic
The above is also extended to first-order logic, with relations, "for all", and
"exists". For example, 'f(x, y)' is a relation, and 'all x . (f(x) -> g(x))' is
a "for all" formula.
Unary and binary relations are currently supported in this model. A binary
relation f(x, y) is parsed as a ternary op, with op-type "R2", and arguments
[f, x, y].
For all "all x . k" is parsed as a binary op, with op-type "A" and arguments
[x, k]. Similarly for "exists x . k".
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import pyparsing
class Language(object):
"""Contains information on a language (e.g., propn. or first-order logic).
This is used by `TreeNet` to learn how to interpret the symbols, and how many
"variable" embeddings to create
"""
def __init__(self, arities, predicates, constants, variables):
"""Initializes a `Language`.
Args:
arities: `OrderedDict` containing a mapping from op symbols to arities.
predicates: List of strings, containing valid predicate (or relation)
symbols.
constants: List of strings, containing valid FOL constant symbols.
variables: List of strings, containing valid FOL variable symbols. Note
that "propositional variables" should be specified as (zero arity)
predicates.
"""
assert isinstance(arities, collections.OrderedDict)
self._arities = arities
self._predicates = predicates
self._constants = constants
self._variables = variables
self._max_arity = max(arities.values())
self._ops = arities.keys()
self._symbols = arities.keys() + predicates + constants + variables
@property
def arities(self):
return self._arities
@property
def predicates(self):
return self._predicates
@property
def constants(self):
return self._constants
@property
def variables(self):
return self._variables
def arity(self, op):
if op in self._predicates or op in self._constants or op in self._variables:
return 0
else:
return self._arities[op]
@property
def max_arity(self):
return self._max_arity
@property
def ops(self):
return self._ops
@property
def symbols(self):
"""Returns ops and variables."""
return self._symbols
# These strings are used in internal representations of the ops when parsed, and
# are stored in sstables when parsing the text data, and then cross-referenced
# when understanding the ops in the TreeNet code. (E.g., important to be able to
# distinguish unary and binary ops.) I.e., if these get changed, then data
# generation likely has to be done again.
#
# This isn't the same as the input operations allowed - there can be a many-to-
# one mapping in this case. E.g., both /\ and & are recognised for AND.
IDENTITY_SYMBOL = ''
NEGATION_SYMBOL = '~'
AND_SYMBOL = '&'
OR_SYMBOL = '|'
XOR_SYMBOL = '^'
IMPLIES_SYMBOL = '>'
FOR_ALL_SYMBOL = 'A'
EXISTS_SYMBOL = 'E'
RELATION_SYMBOL = 'R{}' # formatted for arity of relation.
FALSE_SYMBOL = 'F'
TRUE_SYMBOL = 'T'
def propositional_language(num_variables=26):
"""Makes a propositional logic language."""
predicates = [chr(ord('a') + i) for i in xrange(num_variables)]
return Language(
collections.OrderedDict([
(IDENTITY_SYMBOL, 0),
(NEGATION_SYMBOL, 1),
(AND_SYMBOL, 2),
(OR_SYMBOL, 2),
(XOR_SYMBOL, 2),
(IMPLIES_SYMBOL, 2),
]),
predicates=predicates,
constants=[],
variables=[],
)
FOL_MAX_RELATION_ARITY = 2
def fol_language():
"""Makes a first-order logic language.
This has:
* Predicate symbols p1, ..., p9, q1, ..., r9.
* Constant symbols a1, ..., a9, b1, ..., c9.
* Variable symbols x1, ..., x9, y1, ..., z9.
Returns:
Instance of `Language`.
"""
def make_symbols(start):
"""E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9']."""
return [chr(ord(start) + i) + str(n)
for i in range(0, 3)
for n in range(1, 10)]
return Language(
collections.OrderedDict([
(IDENTITY_SYMBOL, 0),
(NEGATION_SYMBOL, 1),
(AND_SYMBOL, 2),
(OR_SYMBOL, 2),
(XOR_SYMBOL, 2),
(IMPLIES_SYMBOL, 2),
(FOR_ALL_SYMBOL, 2),
(EXISTS_SYMBOL, 2),
(RELATION_SYMBOL.format(1), 2), # unary-relation
(RELATION_SYMBOL.format(2), 3), # binary-relation
]),
predicates=make_symbols('p'),
constants=make_symbols('a'),
variables=make_symbols('x'),
)
# Makes parsing a lot faster:
pyparsing.ParserElement.enablePackrat()
class _SubExpression(
collections.namedtuple('_SubExpression', ('ops', 'inputs'))):
"""Contains a parsed boolean expression.
Attributes:
ops: List of types, which is variable names or operators. For example,
['a', 'b', '~', '&'].
inputs: List of list of input indices relative to the current index (i.e.,
they are negative numbers).
"""
class ParseResult(
collections.namedtuple('ParseResult', ('expression', 'ops', 'inputs'))):
"""Final parse output.
This is like `SubExpression`, but with a couple of extra useful fields. It is
used when generating datasets, as the fields it contains are in a suitable
format for writing to SSTables.
Attributes:
expression: List of ops (including variable letters and brackets) in the
original expression order.
ops: List of ops (including variable letters) for calculating the boolean
expression.
inputs: List of list of input indices relative to the current index (i.e.,
they are negative numbers).
"""
class ExpressionData(
collections.namedtuple('ExpressionData', ('expression', 'ops', 'inputs'))):
"""Similar to `ParseResult`, but for batches of TF tensors from datasets.
Attributes:
expression: String tensor with shape `[batch_size, max_expression_length]`.
ops: String tensor with shape `[batch_size, max_ops_length]`.
inputs: Tensor with shape `[batch_size, max_ops_length, max_arity]`.
"""
def _concat_subexpressions(*expressions):
"""Concatenates the types and input indices of the expressions."""
ops = []
inputs = []
for expression in expressions:
ops += expression.ops
inputs += expression.inputs
return _SubExpression(ops, inputs)
def _ensure_subexpression(expression_or_variable):
if isinstance(expression_or_variable, _SubExpression):
return expression_or_variable
return _SubExpression([expression_or_variable], [[]])
class Parser(object):
"""Parser for tree-like expressions."""
def __init__(self, language):
"""Initializes a `Parser` instance.
Args:
language: Instance of `Language`. Used to determine the different
predicate / constant / variable symbols appearing.
"""
self._language = language
predicate_symbol = pyparsing.oneOf(
language.predicates + [FALSE_SYMBOL, TRUE_SYMBOL])
constant_symbol = pyparsing.oneOf(language.constants)
variable_symbol = pyparsing.oneOf(language.variables)
left_par = pyparsing.Literal('(').suppress()
right_par = pyparsing.Literal(')').suppress()
formula = pyparsing.Forward()
relation_expressions = self._relation_expressions(
predicate_symbol, pyparsing.Or([constant_symbol, variable_symbol]))
formula_without_op = pyparsing.Forward()
negated_formula_without_op = (
pyparsing.Literal('~').suppress() + formula_without_op)
negated_formula_without_op.setParseAction(
lambda args: self._op(args, NEGATION_SYMBOL))
formula_without_op <<= pyparsing.MatchFirst(
[left_par + formula + right_par] + relation_expressions
+ [negated_formula_without_op])
binary_expressions = self._binary_expressions(
formula_without_op, formula)
negation = pyparsing.Literal('~').suppress() + formula
negation.setParseAction(lambda args: self._op(args, NEGATION_SYMBOL))
for_all = (pyparsing.Literal('all').suppress() + variable_symbol
+ pyparsing.Literal('.').suppress() + formula)
for_all.setParseAction(lambda args: self._op(args, FOR_ALL_SYMBOL))
exists = (pyparsing.Literal('exists').suppress() + variable_symbol
+ pyparsing.Literal('.').suppress() + formula)
exists.setParseAction(lambda args: self._op(args, EXISTS_SYMBOL))
formula <<= pyparsing.MatchFirst(
binary_expressions + [negation] + [for_all, exists, formula_without_op])
self._expression = formula
def _relation_expressions(self, predicate_symbol,
variable_or_constant_symbol):
"""Returns list of `pyparsing.Expression` matching relations."""
expressions = []
# Relations of various arities.
for arity in xrange(1, FOL_MAX_RELATION_ARITY + 1):
expression = predicate_symbol + pyparsing.Literal('(').suppress()
for i in xrange(arity):
if i > 0:
expression += pyparsing.Literal(',').suppress()
expression += variable_or_constant_symbol
expression += pyparsing.Literal(')').suppress()
relation_symbol = RELATION_SYMBOL.format(arity)
expression.setParseAction(functools.partial(self._op, op=relation_symbol))
expressions.append(expression)
# Also match a nullary relation without arguments
expressions.append(predicate_symbol)
return expressions
def _binary_expressions(self, left_formula, right_formula):
"""Returns list of `pyparsing.Expression` for various binary ops."""
binary_op_symbols = {
AND_SYMBOL: '& /\\',
OR_SYMBOL: '| \\/',
IMPLIES_SYMBOL: '> ->',
XOR_SYMBOL: '^',
}
expressions = []
for binary_op, op_symbols in binary_op_symbols.iteritems():
op = left_formula + pyparsing.oneOf(op_symbols).suppress() + right_formula
op.setParseAction(functools.partial(self._op, op=binary_op))
expressions.append(op)
return expressions
def _op(self, parse_args, op):
"""Returns a new `SubExpression` from the op and parse args.
Args:
parse_args: List of parse args, which should be instances of
`_SubExpression` or strings representing symbols.
op: String representing the op, e.g., &, A (for all), etc.
Returns:
Instance of `_SubExpression`.
"""
parse_args = [_ensure_subexpression(arg) for arg in parse_args]
arity = len(parse_args)
indices = []
for i in xrange(arity):
if i == 0:
indices = [-1]
else:
indices = [indices[0] - len(parse_args[-i].ops)] + indices
new = _SubExpression([op], [indices])
all_expressions = parse_args + [new]
return _concat_subexpressions(*all_expressions)
def _clean_expression(self, expression):
r"""Cleans up the expression string to use canonical ops, no spaces.
E.g., "all x. (y \\/ z)" will become "Ax.(y|z)" (as a list).
Args:
expression: String.
Returns:
List of characters containing the ops and variable letters in the order
they occur in `string`.
Raises:
ValueError: If the string contains an unrecognised symbol.
"""
map_ = collections.OrderedDict([
('exists', EXISTS_SYMBOL),
('all', FOR_ALL_SYMBOL),
('\\/', OR_SYMBOL),
('/\\', AND_SYMBOL),
('->', IMPLIES_SYMBOL),
('>>', IMPLIES_SYMBOL),
('~', NEGATION_SYMBOL),
('&', AND_SYMBOL),
('|', OR_SYMBOL),
('^', XOR_SYMBOL),
('>', IMPLIES_SYMBOL),
('T', TRUE_SYMBOL),
('F', FALSE_SYMBOL),
('(', '('),
(')', ')'),
(',', ','),
('.', '.'),
(' ', None),
])
for c in (self._language.predicates + self._language.constants
+ self._language.variables):
map_[c] = c
keyword_lengths = sorted(set(len(keyword) for keyword in map_.iterkeys()),
reverse=True)
result = []
i = 0
while i < len(expression):
found = False
for keyword_length in keyword_lengths:
if i + keyword_length <= len(expression):
extracted = expression[i:i + keyword_length]
if extracted in map_:
conversion = map_[extracted]
if conversion is not None:
result.append(conversion)
i += keyword_length
found = True
break
if not found:
raise ValueError('Unable to clean {} at position {}'
.format(expression, i+1))
return result
@property
def language(self):
"""Returns `Language` used by this parser."""
return self._language
def parse(self, expression):
"""Parses the expression, extracting ops and indices.
Args:
expression: The expression as a string.
Returns:
Instance of `ParseResult`.
"""
try:
parsed = self._expression.parseString(expression)[0]
except (pyparsing.ParseException, RuntimeError) as e:
print('Unable to parse: {0}'.format(expression))
raise e
parsed = _ensure_subexpression(parsed)
clean_expression = self._clean_expression(expression)
return ParseResult(
expression=[op.encode('ascii') for op in clean_expression],
ops=[op.encode('ascii') for op in parsed.ops],
inputs=parsed.inputs,
)
| logical-entailment-dataset-master | parser.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for L2L problems."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
from nose_parameterized import parameterized
import problems
class SimpleTest(tf.test.TestCase):
"""Tests simple problem."""
def testShape(self):
problem = problems.simple()
f = problem()
self.assertEqual(f.get_shape().as_list(), [])
def testVariables(self):
problem = problems.simple()
problem()
variables = tf.trainable_variables()
self.assertEqual(len(variables), 1)
self.assertEqual(variables[0].get_shape().as_list(), [])
@parameterized.expand([(-1,), (0,), (1,), (10,)])
def testValues(self, value):
problem = problems.simple()
f = problem()
with self.test_session() as sess:
output = sess.run(f, feed_dict={"x:0": value})
self.assertEqual(output, value**2)
class SimpleMultiOptimizerTest(tf.test.TestCase):
"""Tests multi-optimizer simple problem."""
def testShape(self):
num_dims = 3
problem = problems.simple_multi_optimizer(num_dims=num_dims)
f = problem()
self.assertEqual(f.get_shape().as_list(), [])
def testVariables(self):
num_dims = 3
problem = problems.simple_multi_optimizer(num_dims=num_dims)
problem()
variables = tf.trainable_variables()
self.assertEqual(len(variables), num_dims)
for v in variables:
self.assertEqual(v.get_shape().as_list(), [])
@parameterized.expand([(-1,), (0,), (1,), (10,)])
def testValues(self, value):
problem = problems.simple_multi_optimizer(num_dims=1)
f = problem()
with self.test_session() as sess:
output = sess.run(f, feed_dict={"x_0:0": value})
self.assertEqual(output, value**2)
class QuadraticTest(tf.test.TestCase):
"""Tests Quadratic problem."""
def testShape(self):
problem = problems.quadratic()
f = problem()
self.assertEqual(f.get_shape().as_list(), [])
def testVariables(self):
batch_size = 5
num_dims = 3
problem = problems.quadratic(batch_size=batch_size, num_dims=num_dims)
problem()
variables = tf.trainable_variables()
self.assertEqual(len(variables), 1)
self.assertEqual(variables[0].get_shape().as_list(), [batch_size, num_dims])
@parameterized.expand([(-1,), (0,), (1,), (10,)])
def testValues(self, value):
problem = problems.quadratic(batch_size=1, num_dims=1)
f = problem()
w = 2.0
y = 3.0
with self.test_session() as sess:
output = sess.run(f, feed_dict={"x:0": [[value]],
"w:0": [[[w]]],
"y:0": [[y]]})
self.assertEqual(output, ((w * value) - y)**2)
class EnsembleTest(tf.test.TestCase):
"""Tests Ensemble problem."""
def testShape(self):
num_dims = 3
problem_defs = [{"name": "simple", "options": {}} for _ in xrange(num_dims)]
ensemble = problems.ensemble(problem_defs)
f = ensemble()
self.assertEqual(f.get_shape().as_list(), [])
def testVariables(self):
num_dims = 3
problem_defs = [{"name": "simple", "options": {}} for _ in xrange(num_dims)]
ensemble = problems.ensemble(problem_defs)
ensemble()
variables = tf.trainable_variables()
self.assertEqual(len(variables), num_dims)
for v in variables:
self.assertEqual(v.get_shape().as_list(), [])
@parameterized.expand([(-1,), (0,), (1,), (10,)])
def testValues(self, value):
num_dims = 1
weight = 0.5
problem_defs = [{"name": "simple", "options": {}} for _ in xrange(num_dims)]
ensemble = problems.ensemble(problem_defs, weights=[weight])
f = ensemble()
with self.test_session() as sess:
output = sess.run(f, feed_dict={"problem_0/x:0": value})
self.assertEqual(output, weight * value**2)
if __name__ == "__main__":
tf.test.main()
| learning-to-learn-master | problems_test.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning 2 Learn preprocessing modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sonnet as snt
import tensorflow as tf
class Clamp(snt.AbstractModule):
def __init__(self, min_value=None, max_value=None, name="clamp"):
super(Clamp, self).__init__(name=name)
self._min = min_value
self._max = max_value
def _build(self, inputs):
output = inputs
if self._min is not None:
output = tf.maximum(output, self._min)
if self._max is not None:
output = tf.minimum(output, self._max)
return output
class LogAndSign(snt.AbstractModule):
"""Log and sign preprocessing.
As described in https://arxiv.org/pdf/1606.04474v1.pdf (Appendix A).
"""
def __init__(self, k, name="preprocess_log"):
super(LogAndSign, self).__init__(name=name)
self._k = k
def _build(self, gradients):
"""Connects the LogAndSign module into the graph.
Args:
gradients: `Tensor` of gradients with shape `[d_1, ..., d_n]`.
Returns:
`Tensor` with shape `[d_1, ..., d_n-1, 2 * d_n]`. The first `d_n` elements
along the nth dimension correspond to the log output and the remaining
`d_n` elements to the sign output.
"""
eps = np.finfo(gradients.dtype.as_numpy_dtype).eps
ndims = gradients.get_shape().ndims
log = tf.log(tf.abs(gradients) + eps)
clamped_log = Clamp(min_value=-1.0)(log / self._k) # pylint: disable=not-callable
sign = Clamp(min_value=-1.0, max_value=1.0)(gradients * np.exp(self._k)) # pylint: disable=not-callable
return tf.concat([clamped_log, sign], ndims - 1)
| learning-to-learn-master | preprocess.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning 2 Learn utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from timeit import default_timer as timer
import numpy as np
from six.moves import xrange
import problems
def run_epoch(sess, cost_op, ops, reset, num_unrolls):
"""Runs one optimization epoch."""
start = timer()
sess.run(reset)
for _ in xrange(num_unrolls):
cost = sess.run([cost_op] + ops)[0]
return timer() - start, cost
def print_stats(header, total_error, total_time, n):
"""Prints experiment statistics."""
print(header)
print("Log Mean Final Error: {:.2f}".format(np.log10(total_error / n)))
print("Mean epoch time: {:.2f} s".format(total_time / n))
def get_net_path(name, path):
return None if path is None else os.path.join(path, name + ".l2l")
def get_default_net_config(name, path):
return {
"net": "CoordinateWiseDeepLSTM",
"net_options": {
"layers": (20, 20),
"preprocess_name": "LogAndSign",
"preprocess_options": {"k": 5},
"scale": 0.01,
},
"net_path": get_net_path(name, path)
}
def get_config(problem_name, path=None):
"""Returns problem configuration."""
if problem_name == "simple":
problem = problems.simple()
net_config = {"cw": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (), "initializer": "zeros"},
"net_path": get_net_path("cw", path)
}}
net_assignments = None
elif problem_name == "simple-multi":
problem = problems.simple_multi_optimizer()
net_config = {
"cw": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (), "initializer": "zeros"},
"net_path": get_net_path("cw", path)
},
"adam": {
"net": "Adam",
"net_options": {"learning_rate": 0.1}
}
}
net_assignments = [("cw", ["x_0"]), ("adam", ["x_1"])]
elif problem_name == "quadratic":
problem = problems.quadratic(batch_size=128, num_dims=10)
net_config = {"cw": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (20, 20)},
"net_path": get_net_path("cw", path)
}}
net_assignments = None
elif problem_name == "mnist":
mode = "train" if path is None else "test"
problem = problems.mnist(layers=(20,), mode=mode)
net_config = {"cw": get_default_net_config("cw", path)}
net_assignments = None
elif problem_name == "cifar":
mode = "train" if path is None else "test"
problem = problems.cifar10("cifar10",
conv_channels=(16, 16, 16),
linear_layers=(32,),
mode=mode)
net_config = {"cw": get_default_net_config("cw", path)}
net_assignments = None
elif problem_name == "cifar-multi":
mode = "train" if path is None else "test"
problem = problems.cifar10("cifar10",
conv_channels=(16, 16, 16),
linear_layers=(32,),
mode=mode)
net_config = {
"conv": get_default_net_config("conv", path),
"fc": get_default_net_config("fc", path)
}
conv_vars = ["conv_net_2d/conv_2d_{}/w".format(i) for i in xrange(3)]
fc_vars = ["conv_net_2d/conv_2d_{}/b".format(i) for i in xrange(3)]
fc_vars += ["conv_net_2d/batch_norm_{}/beta".format(i) for i in xrange(3)]
fc_vars += ["mlp/linear_{}/w".format(i) for i in xrange(2)]
fc_vars += ["mlp/linear_{}/b".format(i) for i in xrange(2)]
fc_vars += ["mlp/batch_norm/beta"]
net_assignments = [("conv", conv_vars), ("fc", fc_vars)]
else:
raise ValueError("{} is not a valid problem".format(problem_name))
return problem, net_config, net_assignments
| learning-to-learn-master | util.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for L2L meta-optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from nose_parameterized import parameterized
import numpy as np
from six.moves import xrange
import sonnet as snt
import tensorflow as tf
import meta
import problems
def train(sess, minimize_ops, num_epochs, num_unrolls):
"""L2L training."""
step, update, reset, loss_last, x_last = minimize_ops
for _ in xrange(num_epochs):
sess.run(reset)
for _ in xrange(num_unrolls):
cost, final_x, unused_1, unused_2 = sess.run([loss_last, x_last,
update, step])
return cost, final_x
class L2LTest(tf.test.TestCase):
"""Tests L2L meta-optimizer."""
def testResults(self):
"""Tests reproducibility of Torch results."""
problem = problems.simple()
optimizer = meta.MetaOptimizer(net=dict(
net="CoordinateWiseDeepLSTM",
net_options={
"layers": (),
"initializer": "zeros"
}))
minimize_ops = optimizer.meta_minimize(problem, 5)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
cost, final_x = train(sess, minimize_ops, 1, 2)
# Torch results
torch_cost = 0.7325327
torch_final_x = 0.8559
self.assertAlmostEqual(cost, torch_cost, places=4)
self.assertAlmostEqual(final_x[0], torch_final_x, places=4)
@parameterized.expand([
# Shared optimizer.
(
None,
{
"net": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (1, 1,)}
}
}
),
# Explicit sharing.
(
[("net", ["x_0", "x_1"])],
{
"net": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (1,)}
}
}
),
# Different optimizers.
(
[("net1", ["x_0"]), ("net2", ["x_1"])],
{
"net1": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (1,)}
},
"net2": {"net": "Adam"}
}
),
# Different optimizers for the same variable.
(
[("net1", ["x_0"]), ("net2", ["x_0"])],
{
"net1": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (1,)}
},
"net2": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {"layers": (1,)}
}
}
),
])
def testMultiOptimizer(self, net_assignments, net_config):
"""Tests different variable->net mappings in multi-optimizer problem."""
problem = problems.simple_multi_optimizer(num_dims=2)
optimizer = meta.MetaOptimizer(**net_config)
minimize_ops = optimizer.meta_minimize(problem, 3,
net_assignments=net_assignments)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
train(sess, minimize_ops, 1, 2)
def testSecondDerivatives(self):
"""Tests second derivatives for simple problem."""
problem = problems.simple()
optimizer = meta.MetaOptimizer(net=dict(
net="CoordinateWiseDeepLSTM",
net_options={"layers": ()}))
minimize_ops = optimizer.meta_minimize(problem, 3,
second_derivatives=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
train(sess, minimize_ops, 1, 2)
def testConvolutional(self):
"""Tests L2L applied to problem with convolutions."""
kernel_shape = 4
def convolutional_problem():
conv = snt.Conv2D(output_channels=1,
kernel_shape=kernel_shape,
stride=1,
name="conv")
output = conv(tf.random_normal((100, 100, 3, 10)))
return tf.reduce_sum(output)
net_config = {
"conv": {
"net": "KernelDeepLSTM",
"net_options": {
"kernel_shape": [kernel_shape] * 2,
"layers": (5,)
},
},
}
optimizer = meta.MetaOptimizer(**net_config)
minimize_ops = optimizer.meta_minimize(
convolutional_problem, 3,
net_assignments=[("conv", ["conv/w"])]
)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
train(sess, minimize_ops, 1, 2)
def testWhileLoopProblem(self):
"""Tests L2L applied to problem with while loop."""
def while_loop_problem():
x = tf.get_variable("x", shape=[], initializer=tf.ones_initializer())
# Strange way of squaring the variable.
_, x_squared = tf.while_loop(
cond=lambda t, _: t < 1,
body=lambda t, x: (t + 1, x * x),
loop_vars=(0, x),
name="loop")
return x_squared
optimizer = meta.MetaOptimizer(net=dict(
net="CoordinateWiseDeepLSTM",
net_options={"layers": ()}))
minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
train(sess, minimize_ops, 1, 2)
def testSaveAndLoad(self):
"""Tests saving and loading a meta-optimizer."""
layers = (2, 3)
net_options = {"layers": layers, "initializer": "zeros"}
num_unrolls = 2
num_epochs = 1
problem = problems.simple()
# Original optimizer.
with tf.Graph().as_default() as g1:
optimizer = meta.MetaOptimizer(net=dict(
net="CoordinateWiseDeepLSTM",
net_options=net_options))
minimize_ops = optimizer.meta_minimize(problem, 3)
with self.test_session(graph=g1) as sess:
sess.run(tf.global_variables_initializer())
train(sess, minimize_ops, 1, 2)
# Save optimizer.
tmp_dir = tempfile.mkdtemp()
save_result = optimizer.save(sess, path=tmp_dir)
net_path = next(iter(save_result))
# Retrain original optimizer.
cost, x = train(sess, minimize_ops, num_unrolls, num_epochs)
# Load optimizer and retrain in a new session.
with tf.Graph().as_default() as g2:
optimizer = meta.MetaOptimizer(net=dict(
net="CoordinateWiseDeepLSTM",
net_options=net_options,
net_path=net_path))
minimize_ops = optimizer.meta_minimize(problem, 3)
with self.test_session(graph=g2) as sess:
sess.run(tf.global_variables_initializer())
cost_loaded, x_loaded = train(sess, minimize_ops, num_unrolls, num_epochs)
# The last cost should be the same.
self.assertAlmostEqual(cost, cost_loaded, places=3)
self.assertAlmostEqual(x[0], x_loaded[0], places=3)
# Cleanup.
os.remove(net_path)
os.rmdir(tmp_dir)
if __name__ == "__main__":
tf.test.main()
| learning-to-learn-master | meta_test.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for L2L TensorFlow implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
import meta
import problems
def train(sess, minimize_ops, num_epochs, num_unrolls):
"""L2L training."""
step, update, reset, loss_last, x_last = minimize_ops
for _ in xrange(num_epochs):
sess.run(reset)
for _ in xrange(num_unrolls):
cost, final_x, unused_1, unused_2 = sess.run([loss_last, x_last,
update, step])
return cost, final_x
class L2LTest(tf.test.TestCase):
"""Tests L2L TensorFlow implementation."""
def testSimple(self):
"""Tests L2L applied to simple problem."""
problem = problems.simple()
optimizer = meta.MetaOptimizer(net=dict(
net="CoordinateWiseDeepLSTM",
net_options={
"layers": (),
# Initializing the network to zeros makes learning more stable.
"initializer": "zeros"
}))
minimize_ops = optimizer.meta_minimize(problem, 20, learning_rate=1e-2)
# L2L should solve the simple problem is less than 500 epochs.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
cost, _ = train(sess, minimize_ops, 500, 5)
self.assertLess(cost, 1e-5)
if __name__ == "__main__":
tf.test.main()
| learning-to-learn-master | convergence_test.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for L2L preprocessors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import preprocess
class ClampTest(tf.test.TestCase):
"""Tests Clamp module."""
def testShape(self):
shape = [2, 3]
inputs = tf.random_normal(shape)
clamp = preprocess.Clamp(min_value=-1.0, max_value=1.0)
output = clamp(inputs)
self.assertEqual(output.get_shape().as_list(), shape)
def testMin(self):
shape = [100]
inputs = tf.random_normal(shape)
clamp = preprocess.Clamp(min_value=0.0)
output = clamp(inputs)
with self.test_session() as sess:
output_np = sess.run(output)
self.assertTrue(np.all(np.greater_equal(output_np, np.zeros(shape))))
def testMax(self):
shape = [100]
inputs = tf.random_normal(shape)
clamp = preprocess.Clamp(max_value=0.0)
output = clamp(inputs)
with self.test_session() as sess:
output_np = sess.run(output)
self.assertTrue(np.all(np.less_equal(output_np, np.zeros(shape))))
def testMinAndMax(self):
shape = [100]
inputs = tf.random_normal(shape)
clamp = preprocess.Clamp(min_value=0.0, max_value=0.0)
output = clamp(inputs)
with self.test_session() as sess:
output_np = sess.run(output)
self.assertAllEqual(output_np, np.zeros(shape))
class LogAndSignTest(tf.test.TestCase):
"""Tests LogAndSign module."""
def testShape(self):
shape = [2, 3]
inputs = tf.random_normal(shape)
module = preprocess.LogAndSign(k=1)
output = module(inputs)
self.assertEqual(output.get_shape().as_list(), shape[:-1] + [shape[-1] * 2])
def testLogWithOnes(self):
shape = [1]
inputs = tf.ones(shape)
module = preprocess.LogAndSign(k=10)
output = module(inputs)
with self.test_session() as sess:
output_np = sess.run(output)
log_np = output_np[0]
self.assertAlmostEqual(log_np, 0.0)
def testSign(self):
shape = [2, 1]
inputs = tf.random_normal(shape)
module = preprocess.LogAndSign(k=1)
output = module(inputs)
with self.test_session() as sess:
inputs_np, output_np = sess.run([inputs, output])
sign_np = output_np[:, 1:]
self.assertAllEqual(np.sign(sign_np), np.sign(inputs_np))
if __name__ == "__main__":
tf.test.main()
| learning-to-learn-master | preprocess_test.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning 2 Learn meta-optimizer networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import sys
import dill as pickle
import numpy as np
import six
import sonnet as snt
import tensorflow as tf
import preprocess
def factory(net, net_options=(), net_path=None):
"""Network factory."""
net_class = getattr(sys.modules[__name__], net)
net_options = dict(net_options)
if net_path:
with open(net_path, "rb") as f:
net_options["initializer"] = pickle.load(f)
return net_class(**net_options)
def save(network, sess, filename=None):
"""Save the variables contained by a network to disk."""
to_save = collections.defaultdict(dict)
variables = snt.get_variables_in_module(network)
for v in variables:
split = v.name.split(":")[0].split("/")
module_name = split[-2]
variable_name = split[-1]
to_save[module_name][variable_name] = v.eval(sess)
if filename:
with open(filename, "wb") as f:
pickle.dump(to_save, f)
return to_save
@six.add_metaclass(abc.ABCMeta)
class Network(snt.RNNCore):
"""Base class for meta-optimizer networks."""
@abc.abstractmethod
def initial_state_for_inputs(self, inputs, **kwargs):
"""Initial state given inputs."""
pass
def _convert_to_initializer(initializer):
"""Returns a TensorFlow initializer.
* Corresponding TensorFlow initializer when the argument is a string (e.g.
"zeros" -> `tf.zeros_initializer`).
* `tf.constant_initializer` when the argument is a `numpy` `array`.
* Identity when the argument is a TensorFlow initializer.
Args:
initializer: `string`, `numpy` `array` or TensorFlow initializer.
Returns:
TensorFlow initializer.
"""
if isinstance(initializer, str):
return getattr(tf, initializer + "_initializer")(dtype=tf.float32)
elif isinstance(initializer, np.ndarray):
return tf.constant_initializer(initializer)
else:
return initializer
def _get_initializers(initializers, fields):
"""Produces a nn initialization `dict` (see Linear docs for a example).
Grabs initializers for relevant fields if the first argument is a `dict` or
reuses the same initializer for all fields otherwise. All initializers are
processed using `_convert_to_initializer`.
Args:
initializers: Initializer or <variable, initializer> dictionary.
fields: Fields nn is expecting for module initialization.
Returns:
nn initialization dictionary.
"""
result = {}
for f in fields:
if isinstance(initializers, dict):
if f in initializers:
# Variable-specific initializer.
result[f] = _convert_to_initializer(initializers[f])
else:
# Common initiliazer for all variables.
result[f] = _convert_to_initializer(initializers)
return result
def _get_layer_initializers(initializers, layer_name, fields):
"""Produces a nn initialization dictionary for a layer.
Calls `_get_initializers using initializers[layer_name]` if `layer_name` is a
valid key or using initializers otherwise (reuses initializers between
layers).
Args:
initializers: Initializer, <variable, initializer> dictionary,
<layer, initializer> dictionary.
layer_name: Layer name.
fields: Fields nn is expecting for module initialization.
Returns:
nn initialization dictionary.
"""
# No initializers specified.
if initializers is None:
return None
# Layer-specific initializer.
if isinstance(initializers, dict) and layer_name in initializers:
return _get_initializers(initializers[layer_name], fields)
return _get_initializers(initializers, fields)
class StandardDeepLSTM(Network):
"""LSTM layers with a Linear layer on top."""
def __init__(self, output_size, layers, preprocess_name="identity",
preprocess_options=None, scale=1.0, initializer=None,
name="deep_lstm"):
"""Creates an instance of `StandardDeepLSTM`.
Args:
output_size: Output sizes of the final linear layer.
layers: Output sizes of LSTM layers.
preprocess_name: Gradient preprocessing class name (in `l2l.preprocess` or
tf modules). Default is `tf.identity`.
preprocess_options: Gradient preprocessing options.
scale: Gradient scaling (default is 1.0).
initializer: Variable initializer for linear layer. See `snt.Linear` and
`snt.LSTM` docs for more info. This parameter can be a string (e.g.
"zeros" will be converted to tf.zeros_initializer).
name: Module name.
"""
super(StandardDeepLSTM, self).__init__(name=name)
self._output_size = output_size
self._scale = scale
if hasattr(preprocess, preprocess_name):
preprocess_class = getattr(preprocess, preprocess_name)
self._preprocess = preprocess_class(**preprocess_options)
else:
self._preprocess = getattr(tf, preprocess_name)
with tf.variable_scope(self._template.variable_scope):
self._cores = []
for i, size in enumerate(layers, start=1):
name = "lstm_{}".format(i)
init = _get_layer_initializers(initializer, name,
("w_gates", "b_gates"))
self._cores.append(snt.LSTM(size, name=name, initializers=init))
self._rnn = snt.DeepRNN(self._cores, skip_connections=False,
name="deep_rnn")
init = _get_layer_initializers(initializer, "linear", ("w", "b"))
self._linear = snt.Linear(output_size, name="linear", initializers=init)
def _build(self, inputs, prev_state):
"""Connects the `StandardDeepLSTM` module into the graph.
Args:
inputs: 2D `Tensor` ([batch_size, input_size]).
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.
"""
# Adds preprocessing dimension and preprocess.
inputs = self._preprocess(tf.expand_dims(inputs, -1))
# Incorporates preprocessing into data dimension.
inputs = tf.reshape(inputs, [inputs.get_shape().as_list()[0], -1])
output, next_state = self._rnn(inputs, prev_state)
return self._linear(output) * self._scale, next_state
def initial_state_for_inputs(self, inputs, **kwargs):
batch_size = inputs.get_shape().as_list()[0]
return self._rnn.initial_state(batch_size, **kwargs)
class CoordinateWiseDeepLSTM(StandardDeepLSTM):
"""Coordinate-wise `DeepLSTM`."""
def __init__(self, name="cw_deep_lstm", **kwargs):
"""Creates an instance of `CoordinateWiseDeepLSTM`.
Args:
name: Module name.
**kwargs: Additional `DeepLSTM` args.
"""
super(CoordinateWiseDeepLSTM, self).__init__(1, name=name, **kwargs)
def _reshape_inputs(self, inputs):
return tf.reshape(inputs, [-1, 1])
def _build(self, inputs, prev_state):
"""Connects the CoordinateWiseDeepLSTM module into the graph.
Args:
inputs: Arbitrarily shaped `Tensor`.
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.
"""
input_shape = inputs.get_shape().as_list()
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(CoordinateWiseDeepLSTM, self)._build
output, next_state = build_fn(reshaped_inputs, prev_state)
# Recover original shape.
return tf.reshape(output, input_shape), next_state
def initial_state_for_inputs(self, inputs, **kwargs):
reshaped_inputs = self._reshape_inputs(inputs)
return super(CoordinateWiseDeepLSTM, self).initial_state_for_inputs(
reshaped_inputs, **kwargs)
class KernelDeepLSTM(StandardDeepLSTM):
"""`DeepLSTM` for convolutional filters.
The inputs are assumed to be shaped as convolutional filters with an extra
preprocessing dimension ([kernel_w, kernel_h, n_input_channels,
n_output_channels]).
"""
def __init__(self, kernel_shape, name="kernel_deep_lstm", **kwargs):
"""Creates an instance of `KernelDeepLSTM`.
Args:
kernel_shape: Kernel shape (2D `tuple`).
name: Module name.
**kwargs: Additional `DeepLSTM` args.
"""
self._kernel_shape = kernel_shape
output_size = np.prod(kernel_shape)
super(KernelDeepLSTM, self).__init__(output_size, name=name, **kwargs)
def _reshape_inputs(self, inputs):
transposed_inputs = tf.transpose(inputs, perm=[2, 3, 0, 1])
return tf.reshape(transposed_inputs, [-1] + self._kernel_shape)
def _build(self, inputs, prev_state):
"""Connects the KernelDeepLSTM module into the graph.
Args:
inputs: 4D `Tensor` (convolutional filter).
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.
"""
input_shape = inputs.get_shape().as_list()
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(KernelDeepLSTM, self)._build
output, next_state = build_fn(reshaped_inputs, prev_state)
transposed_output = tf.transpose(output, [1, 0])
# Recover original shape.
return tf.reshape(transposed_output, input_shape), next_state
def initial_state_for_inputs(self, inputs, **kwargs):
"""Batch size given inputs."""
reshaped_inputs = self._reshape_inputs(inputs)
return super(KernelDeepLSTM, self).initial_state_for_inputs(
reshaped_inputs, **kwargs)
class Sgd(Network):
"""Identity network which acts like SGD."""
def __init__(self, learning_rate=0.001, name="sgd"):
"""Creates an instance of the Identity optimizer network.
Args:
learning_rate: constant learning rate to use.
name: Module name.
"""
super(Sgd, self).__init__(name=name)
self._learning_rate = learning_rate
def _build(self, inputs, _):
return -self._learning_rate * inputs, []
def initial_state_for_inputs(self, inputs, **kwargs):
return []
def _update_adam_estimate(estimate, value, b):
return (b * estimate) + ((1 - b) * value)
def _debias_adam_estimate(estimate, b, t):
return estimate / (1 - tf.pow(b, t))
class Adam(Network):
"""Adam algorithm (https://arxiv.org/pdf/1412.6980v8.pdf)."""
def __init__(self, learning_rate=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-8,
name="adam"):
"""Creates an instance of Adam."""
super(Adam, self).__init__(name=name)
self._learning_rate = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _build(self, g, prev_state):
"""Connects the Adam module into the graph."""
b1 = self._beta1
b2 = self._beta2
g_shape = g.get_shape().as_list()
g = tf.reshape(g, (-1, 1))
t, m, v = prev_state
t_next = t + 1
m_next = _update_adam_estimate(m, g, b1)
m_hat = _debias_adam_estimate(m_next, b1, t_next)
v_next = _update_adam_estimate(v, tf.square(g), b2)
v_hat = _debias_adam_estimate(v_next, b2, t_next)
update = -self._learning_rate * m_hat / (tf.sqrt(v_hat) + self._epsilon)
return tf.reshape(update, g_shape), (t_next, m_next, v_next)
def initial_state_for_inputs(self, inputs, dtype=tf.float32, **kwargs):
batch_size = int(np.prod(inputs.get_shape().as_list()))
t = tf.zeros((), dtype=dtype)
m = tf.zeros((batch_size, 1), dtype=dtype)
v = tf.zeros((batch_size, 1), dtype=dtype)
return (t, m, v)
| learning-to-learn-master | networks.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning 2 Learn problems."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import sys
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist as mnist_dataset
_nn_initializers = {
"w": tf.random_normal_initializer(mean=0, stddev=0.01),
"b": tf.random_normal_initializer(mean=0, stddev=0.01),
}
def simple():
"""Simple problem: f(x) = x^2."""
def build():
"""Builds loss graph."""
x = tf.get_variable(
"x",
shape=[],
dtype=tf.float32,
initializer=tf.ones_initializer())
return tf.square(x, name="x_squared")
return build
def simple_multi_optimizer(num_dims=2):
"""Multidimensional simple problem."""
def get_coordinate(i):
return tf.get_variable("x_{}".format(i),
shape=[],
dtype=tf.float32,
initializer=tf.ones_initializer())
def build():
coordinates = [get_coordinate(i) for i in xrange(num_dims)]
x = tf.concat([tf.expand_dims(c, 0) for c in coordinates], 0)
return tf.reduce_sum(tf.square(x, name="x_squared"))
return build
def quadratic(batch_size=128, num_dims=10, stddev=0.01, dtype=tf.float32):
"""Quadratic problem: f(x) = ||Wx - y||."""
def build():
"""Builds loss graph."""
# Trainable variable.
x = tf.get_variable(
"x",
shape=[batch_size, num_dims],
dtype=dtype,
initializer=tf.random_normal_initializer(stddev=stddev))
# Non-trainable variables.
w = tf.get_variable("w",
shape=[batch_size, num_dims, num_dims],
dtype=dtype,
initializer=tf.random_uniform_initializer(),
trainable=False)
y = tf.get_variable("y",
shape=[batch_size, num_dims],
dtype=dtype,
initializer=tf.random_uniform_initializer(),
trainable=False)
product = tf.squeeze(tf.matmul(w, tf.expand_dims(x, -1)))
return tf.reduce_mean(tf.reduce_sum((product - y) ** 2, 1))
return build
def ensemble(problems, weights=None):
"""Ensemble of problems.
Args:
problems: List of problems. Each problem is specified by a dict containing
the keys 'name' and 'options'.
weights: Optional list of weights for each problem.
Returns:
Sum of (weighted) losses.
Raises:
ValueError: If weights has an incorrect length.
"""
if weights and len(weights) != len(problems):
raise ValueError("len(weights) != len(problems)")
build_fns = [getattr(sys.modules[__name__], p["name"])(**p["options"])
for p in problems]
def build():
loss = 0
for i, build_fn in enumerate(build_fns):
with tf.variable_scope("problem_{}".format(i)):
loss_p = build_fn()
if weights:
loss_p *= weights[i]
loss += loss_p
return loss
return build
def _xent_loss(output, labels):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output,
labels=labels)
return tf.reduce_mean(loss)
def mnist(layers, # pylint: disable=invalid-name
activation="sigmoid",
batch_size=128,
mode="train"):
"""Mnist classification with a multi-layer perceptron."""
if activation == "sigmoid":
activation_op = tf.sigmoid
elif activation == "relu":
activation_op = tf.nn.relu
else:
raise ValueError("{} activation not supported".format(activation))
# Data.
data = mnist_dataset.load_mnist()
data = getattr(data, mode)
images = tf.constant(data.images, dtype=tf.float32, name="MNIST_images")
images = tf.reshape(images, [-1, 28, 28, 1])
labels = tf.constant(data.labels, dtype=tf.int64, name="MNIST_labels")
# Network.
mlp = snt.nets.MLP(list(layers) + [10],
activation=activation_op,
initializers=_nn_initializers)
network = snt.Sequential([snt.BatchFlatten(), mlp])
def build():
indices = tf.random_uniform([batch_size], 0, data.num_examples, tf.int64)
batch_images = tf.gather(images, indices)
batch_labels = tf.gather(labels, indices)
output = network(batch_images)
return _xent_loss(output, batch_labels)
return build
CIFAR10_URL = "http://www.cs.toronto.edu/~kriz"
CIFAR10_FILE = "cifar-10-binary.tar.gz"
CIFAR10_FOLDER = "cifar-10-batches-bin"
def _maybe_download_cifar10(path):
"""Download and extract the tarball from Alex's website."""
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, CIFAR10_FILE)
if not os.path.exists(filepath):
print("Downloading CIFAR10 dataset to {}".format(filepath))
url = os.path.join(CIFAR10_URL, CIFAR10_FILE)
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print("Successfully downloaded {} bytes".format(statinfo.st_size))
tarfile.open(filepath, "r:gz").extractall(path)
def cifar10(path, # pylint: disable=invalid-name
conv_channels=None,
linear_layers=None,
batch_norm=True,
batch_size=128,
num_threads=4,
min_queue_examples=1000,
mode="train"):
"""Cifar10 classification with a convolutional network."""
# Data.
_maybe_download_cifar10(path)
# Read images and labels from disk.
if mode == "train":
filenames = [os.path.join(path,
CIFAR10_FOLDER,
"data_batch_{}.bin".format(i))
for i in xrange(1, 6)]
elif mode == "test":
filenames = [os.path.join(path, "test_batch.bin")]
else:
raise ValueError("Mode {} not recognised".format(mode))
depth = 3
height = 32
width = 32
label_bytes = 1
image_bytes = depth * height * width
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
_, record = reader.read(tf.train.string_input_producer(filenames))
record_bytes = tf.decode_raw(record, tf.uint8)
label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
raw_image = tf.slice(record_bytes, [label_bytes], [image_bytes])
image = tf.cast(tf.reshape(raw_image, [depth, height, width]), tf.float32)
# height x width x depth.
image = tf.transpose(image, [1, 2, 0])
image = tf.div(image, 255)
queue = tf.RandomShuffleQueue(capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.float32, tf.int32],
shapes=[image.get_shape(), label.get_shape()])
enqueue_ops = [queue.enqueue([image, label]) for _ in xrange(num_threads)]
tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))
# Network.
def _conv_activation(x): # pylint: disable=invalid-name
return tf.nn.max_pool(tf.nn.relu(x),
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME")
conv = snt.nets.ConvNet2D(output_channels=conv_channels,
kernel_shapes=[5],
strides=[1],
paddings=[snt.SAME],
activation=_conv_activation,
activate_final=True,
initializers=_nn_initializers,
use_batch_norm=batch_norm)
if batch_norm:
linear_activation = lambda x: tf.nn.relu(snt.BatchNorm()(x))
else:
linear_activation = tf.nn.relu
mlp = snt.nets.MLP(list(linear_layers) + [10],
activation=linear_activation,
initializers=_nn_initializers)
network = snt.Sequential([conv, snt.BatchFlatten(), mlp])
def build():
image_batch, label_batch = queue.dequeue_many(batch_size)
label_batch = tf.reshape(label_batch, [batch_size])
output = network(image_batch)
return _xent_loss(output, label_batch)
return build
| learning-to-learn-master | problems.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning 2 Learn training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import monitored_session as ms
import meta
import util
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
flags.DEFINE_string("save_path", None, "Path for saved meta-optimizer.")
flags.DEFINE_integer("num_epochs", 10000, "Number of training epochs.")
flags.DEFINE_integer("log_period", 100, "Log period.")
flags.DEFINE_integer("evaluation_period", 1000, "Evaluation period.")
flags.DEFINE_integer("evaluation_epochs", 20, "Number of evaluation epochs.")
flags.DEFINE_string("problem", "simple", "Type of problem.")
flags.DEFINE_integer("num_steps", 100,
"Number of optimization steps per epoch.")
flags.DEFINE_integer("unroll_length", 20, "Meta-optimizer unroll length.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
flags.DEFINE_boolean("second_derivatives", False, "Use second derivatives.")
def main(_):
# Configuration.
num_unrolls = FLAGS.num_steps // FLAGS.unroll_length
if FLAGS.save_path is not None:
if os.path.exists(FLAGS.save_path):
raise ValueError("Folder {} already exists".format(FLAGS.save_path))
else:
os.mkdir(FLAGS.save_path)
# Problem.
problem, net_config, net_assignments = util.get_config(FLAGS.problem)
# Optimizer setup.
optimizer = meta.MetaOptimizer(**net_config)
minimize = optimizer.meta_minimize(
problem, FLAGS.unroll_length,
learning_rate=FLAGS.learning_rate,
net_assignments=net_assignments,
second_derivatives=FLAGS.second_derivatives)
step, update, reset, cost_op, _ = minimize
with ms.MonitoredSession() as sess:
# Prevent accidental changes to the graph.
tf.get_default_graph().finalize()
best_evaluation = float("inf")
total_time = 0
total_cost = 0
for e in xrange(FLAGS.num_epochs):
# Training.
time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
num_unrolls)
total_time += time
total_cost += cost
# Logging.
if (e + 1) % FLAGS.log_period == 0:
util.print_stats("Epoch {}".format(e + 1), total_cost, total_time,
FLAGS.log_period)
total_time = 0
total_cost = 0
# Evaluation.
if (e + 1) % FLAGS.evaluation_period == 0:
eval_cost = 0
eval_time = 0
for _ in xrange(FLAGS.evaluation_epochs):
time, cost = util.run_epoch(sess, cost_op, [update], reset,
num_unrolls)
eval_time += time
eval_cost += cost
util.print_stats("EVALUATION", eval_cost, eval_time,
FLAGS.evaluation_epochs)
if FLAGS.save_path is not None and eval_cost < best_evaluation:
print("Removing previously saved meta-optimizer")
for f in os.listdir(FLAGS.save_path):
os.remove(os.path.join(FLAGS.save_path, f))
print("Saving meta-optimizer to {}".format(FLAGS.save_path))
optimizer.save(sess, FLAGS.save_path)
best_evaluation = eval_cost
if __name__ == "__main__":
tf.app.run()
| learning-to-learn-master | train.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning 2 Learn evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import monitored_session as ms
import meta
import util
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
flags.DEFINE_string("optimizer", "L2L", "Optimizer.")
flags.DEFINE_string("path", None, "Path to saved meta-optimizer network.")
flags.DEFINE_integer("num_epochs", 100, "Number of evaluation epochs.")
flags.DEFINE_integer("seed", None, "Seed for TensorFlow's RNG.")
flags.DEFINE_string("problem", "simple", "Type of problem.")
flags.DEFINE_integer("num_steps", 100,
"Number of optimization steps per epoch.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
def main(_):
# Configuration.
num_unrolls = FLAGS.num_steps
if FLAGS.seed:
tf.set_random_seed(FLAGS.seed)
# Problem.
problem, net_config, net_assignments = util.get_config(FLAGS.problem,
FLAGS.path)
# Optimizer setup.
if FLAGS.optimizer == "Adam":
cost_op = problem()
problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
problem_reset = tf.variables_initializer(problem_vars)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
update = optimizer.minimize(cost_op)
reset = [problem_reset, optimizer_reset]
elif FLAGS.optimizer == "L2L":
if FLAGS.path is None:
logging.warning("Evaluating untrained L2L optimizer")
optimizer = meta.MetaOptimizer(**net_config)
meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments)
_, update, reset, cost_op, _ = meta_loss
else:
raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))
with ms.MonitoredSession() as sess:
# Prevent accidental changes to the graph.
tf.get_default_graph().finalize()
total_time = 0
total_cost = 0
for _ in xrange(FLAGS.num_epochs):
# Training.
time, cost = util.run_epoch(sess, cost_op, [update], reset,
num_unrolls)
total_time += time
total_cost += cost
# Results.
util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
total_time, FLAGS.num_epochs)
if __name__ == "__main__":
tf.app.run()
| learning-to-learn-master | evaluate.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for L2L networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nose_parameterized import parameterized
import numpy as np
import sonnet as snt
import tensorflow as tf
import networks
class CoordinateWiseDeepLSTMTest(tf.test.TestCase):
"""Tests CoordinateWiseDeepLSTM network."""
def testShape(self):
shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.CoordinateWiseDeepLSTM(layers=(1, 1))
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
self.assertEqual(update.get_shape().as_list(), shape)
def testTrainable(self):
"""Tests the network contains trainable variables."""
shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.CoordinateWiseDeepLSTM(layers=(1,))
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
# Weights and biases for two layers.
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 4)
@parameterized.expand([
["zeros"],
[{"w": "zeros", "b": "zeros", "bad": "bad"}],
[{"w": tf.zeros_initializer(), "b": np.array([0])}],
[{"linear": {"w": tf.zeros_initializer(), "b": "zeros"}}]
])
def testResults(self, initializer):
"""Tests zero updates when last layer is initialized to zero."""
shape = [10]
gradients = tf.random_normal(shape)
net = networks.CoordinateWiseDeepLSTM(layers=(1, 1),
initializer=initializer)
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
update_np = sess.run(update)
self.assertAllEqual(update_np, np.zeros(shape))
class KernelDeepLSTMTest(tf.test.TestCase):
"""Tests KernelDeepLSTMTest network."""
def testShape(self):
kernel_shape = [5, 5]
shape = kernel_shape + [2, 2] # The input has to be 4-dimensional.
gradients = tf.random_normal(shape)
net = networks.KernelDeepLSTM(layers=(1, 1), kernel_shape=kernel_shape)
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
self.assertEqual(update.get_shape().as_list(), shape)
def testTrainable(self):
"""Tests the network contains trainable variables."""
kernel_shape = [5, 5]
shape = kernel_shape + [2, 2] # The input has to be 4-dimensional.
gradients = tf.random_normal(shape)
net = networks.KernelDeepLSTM(layers=(1,), kernel_shape=kernel_shape)
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
# Weights and biases for two layers.
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 4)
@parameterized.expand([
["zeros"],
[{"w": "zeros", "b": "zeros", "bad": "bad"}],
[{"w": tf.zeros_initializer(), "b": np.array([0])}],
[{"linear": {"w": tf.zeros_initializer(), "b": "zeros"}}]
])
def testResults(self, initializer):
"""Tests zero updates when last layer is initialized to zero."""
kernel_shape = [5, 5]
shape = kernel_shape + [2, 2] # The input has to be 4-dimensional.
gradients = tf.random_normal(shape)
net = networks.KernelDeepLSTM(layers=(1, 1),
kernel_shape=kernel_shape,
initializer=initializer)
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
update_np = sess.run(update)
self.assertAllEqual(update_np, np.zeros(shape))
class SgdTest(tf.test.TestCase):
"""Tests Sgd network."""
def testShape(self):
shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.Sgd()
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
self.assertEqual(update.get_shape().as_list(), shape)
def testNonTrainable(self):
"""Tests the network doesn't contain trainable variables."""
shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.Sgd()
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 0)
def testResults(self):
"""Tests network produces zero updates with learning rate equal to zero."""
shape = [10]
learning_rate = 0.01
gradients = tf.random_normal(shape)
net = networks.Sgd(learning_rate=learning_rate)
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
with self.test_session() as sess:
gradients_np, update_np = sess.run([gradients, update])
self.assertAllEqual(update_np, -learning_rate * gradients_np)
class AdamTest(tf.test.TestCase):
"""Tests Adam network."""
def testShape(self):
shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.Adam()
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
self.assertEqual(update.get_shape().as_list(), shape)
def testNonTrainable(self):
"""Tests the network doesn't contain trainable variables."""
shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.Adam()
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 0)
def testZeroLearningRate(self):
"""Tests network produces zero updates with learning rate equal to zero."""
shape = [10]
gradients = tf.random_normal(shape)
net = networks.Adam(learning_rate=0)
state = net.initial_state_for_inputs(gradients)
update, _ = net(gradients, state)
with self.test_session() as sess:
update_np = sess.run(update)
self.assertAllEqual(update_np, np.zeros(shape))
if __name__ == "__main__":
tf.test.main()
| learning-to-learn-master | networks_test.py |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning to learn (meta) optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import os
import mock
import sonnet as snt
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
import networks
def _nested_assign(ref, value):
"""Returns a nested collection of TensorFlow assign operations.
Args:
ref: Nested collection of TensorFlow variables.
value: Values to be assigned to the variables. Must have the same structure
as `ref`.
Returns:
Nested collection (same structure as `ref`) of TensorFlow assign operations.
Raises:
ValueError: If `ref` and `values` have different structures.
"""
if isinstance(ref, list) or isinstance(ref, tuple):
if len(ref) != len(value):
raise ValueError("ref and value have different lengths.")
result = [_nested_assign(r, v) for r, v in zip(ref, value)]
if isinstance(ref, tuple):
return tuple(result)
return result
else:
return tf.assign(ref, value)
def _nested_variable(init, name=None, trainable=False):
"""Returns a nested collection of TensorFlow variables.
Args:
init: Nested collection of TensorFlow initializers.
name: Variable name.
trainable: Make variables trainable (`False` by default).
Returns:
Nested collection (same structure as `init`) of TensorFlow variables.
"""
if isinstance(init, list) or isinstance(init, tuple):
result = [_nested_variable(i, name, trainable) for i in init]
if isinstance(init, tuple):
return tuple(result)
return result
else:
return tf.Variable(init, name=name, trainable=trainable)
def _wrap_variable_creation(func, custom_getter):
"""Provides a custom getter for all variable creations."""
original_get_variable = tf.get_variable
def custom_get_variable(*args, **kwargs):
if hasattr(kwargs, "custom_getter"):
raise AttributeError("Custom getters are not supported for optimizee "
"variables.")
return original_get_variable(*args, custom_getter=custom_getter, **kwargs)
# Mock the get_variable method.
with mock.patch("tensorflow.get_variable", custom_get_variable):
return func()
def _get_variables(func):
"""Calls func, returning any variables created, but ignoring its return value.
Args:
func: Function to be called.
Returns:
A tuple (variables, constants) where the first element is a list of
trainable variables and the second is the non-trainable variables.
"""
variables = []
constants = []
def custom_getter(getter, name, **kwargs):
trainable = kwargs["trainable"]
kwargs["trainable"] = False
variable = getter(name, **kwargs)
if trainable:
variables.append(variable)
else:
constants.append(variable)
return variable
with tf.name_scope("unused_graph"):
_wrap_variable_creation(func, custom_getter)
return variables, constants
def _make_with_custom_variables(func, variables):
"""Calls func and replaces any trainable variables.
This returns the output of func, but whenever `get_variable` is called it
will replace any trainable variables with the tensors in `variables`, in the
same order. Non-trainable variables will re-use any variables already
created.
Args:
func: Function to be called.
variables: A list of tensors replacing the trainable variables.
Returns:
The return value of func is returned.
"""
variables = collections.deque(variables)
def custom_getter(getter, name, **kwargs):
if kwargs["trainable"]:
return variables.popleft()
else:
kwargs["reuse"] = True
return getter(name, **kwargs)
return _wrap_variable_creation(func, custom_getter)
MetaLoss = collections.namedtuple("MetaLoss", "loss, update, reset, fx, x")
MetaStep = collections.namedtuple("MetaStep", "step, update, reset, fx, x")
def _make_nets(variables, config, net_assignments):
"""Creates the optimizer networks.
Args:
variables: A list of variables to be optimized.
config: A dictionary of network configurations, each of which will be
passed to networks.Factory to construct a single optimizer net.
net_assignments: A list of tuples where each tuple is of the form (netid,
variable_names) and is used to assign variables to networks. netid must
be a key in config.
Returns:
A tuple (nets, keys, subsets) where nets is a dictionary of created
optimizer nets such that the net with key keys[i] should be applied to the
subset of variables listed in subsets[i].
Raises:
ValueError: If net_assignments is None and the configuration defines more
than one network.
"""
# create a dictionary which maps a variable name to its index within the
# list of variables.
name_to_index = dict((v.name.split(":")[0], i)
for i, v in enumerate(variables))
if net_assignments is None:
if len(config) != 1:
raise ValueError("Default net_assignments can only be used if there is "
"a single net config.")
with tf.variable_scope("vars_optimizer"):
key = next(iter(config))
kwargs = config[key]
net = networks.factory(**kwargs)
nets = {key: net}
keys = [key]
subsets = [range(len(variables))]
else:
nets = {}
keys = []
subsets = []
with tf.variable_scope("vars_optimizer"):
for key, names in net_assignments:
if key in nets:
raise ValueError("Repeated netid in net_assigments.")
nets[key] = networks.factory(**config[key])
subset = [name_to_index[name] for name in names]
keys.append(key)
subsets.append(subset)
print("Net: {}, Subset: {}".format(key, subset))
# subsets should be a list of disjoint subsets (as lists!) of the variables
# and nets should be a list of networks to apply to each subset.
return nets, keys, subsets
class MetaOptimizer(object):
"""Learning to learn (meta) optimizer.
Optimizer which has an internal RNN which takes as input, at each iteration,
the gradient of the function being minimized and returns a step direction.
This optimizer can then itself be optimized to learn optimization on a set of
tasks.
"""
def __init__(self, **kwargs):
"""Creates a MetaOptimizer.
Args:
**kwargs: A set of keyword arguments mapping network identifiers (the
keys) to parameters that will be passed to networks.Factory (see docs
for more info). These can be used to assign different optimizee
parameters to different optimizers (see net_assignments in the
meta_loss method).
"""
self._nets = None
if not kwargs:
# Use a default coordinatewise network if nothing is given. this allows
# for no network spec and no assignments.
self._config = {
"coordinatewise": {
"net": "CoordinateWiseDeepLSTM",
"net_options": {
"layers": (20, 20),
"preprocess_name": "LogAndSign",
"preprocess_options": {"k": 5},
"scale": 0.01,
}}}
else:
self._config = kwargs
def save(self, sess, path=None):
"""Save meta-optimizer."""
result = {}
for k, net in self._nets.items():
if path is None:
filename = None
key = k
else:
filename = os.path.join(path, "{}.l2l".format(k))
key = filename
net_vars = networks.save(net, sess, filename=filename)
result[key] = net_vars
return result
def meta_loss(self,
make_loss,
len_unroll,
net_assignments=None,
second_derivatives=False):
"""Returns an operator computing the meta-loss.
Args:
make_loss: Callable which returns the optimizee loss; note that this
should create its ops in the default graph.
len_unroll: Number of steps to unroll.
net_assignments: variable to optimizer mapping. If not None, it should be
a list of (k, names) tuples, where k is a valid key in the kwargs
passed at at construction time and names is a list of variable names.
second_derivatives: Use second derivatives (default is false).
Returns:
namedtuple containing (loss, update, reset, fx, x)
"""
# Construct an instance of the problem only to grab the variables. This
# loss will never be evaluated.
x, constants = _get_variables(make_loss)
print("Optimizee variables")
print([op.name for op in x])
print("Problem variables")
print([op.name for op in constants])
# Create the optimizer networks and find the subsets of variables to assign
# to each optimizer.
nets, net_keys, subsets = _make_nets(x, self._config, net_assignments)
# Store the networks so we can save them later.
self._nets = nets
# Create hidden state for each subset of variables.
state = []
with tf.name_scope("states"):
for i, (subset, key) in enumerate(zip(subsets, net_keys)):
net = nets[key]
with tf.name_scope("state_{}".format(i)):
state.append(_nested_variable(
[net.initial_state_for_inputs(x[j], dtype=tf.float32)
for j in subset],
name="state", trainable=False))
def update(net, fx, x, state):
"""Parameter and RNN state update."""
with tf.name_scope("gradients"):
gradients = tf.gradients(fx, x)
# Stopping the gradient here corresponds to what was done in the
# original L2L NIPS submission. However it looks like things like
# BatchNorm, etc. don't support second-derivatives so we still need
# this term.
if not second_derivatives:
gradients = [tf.stop_gradient(g) for g in gradients]
with tf.name_scope("deltas"):
deltas, state_next = zip(*[net(g, s) for g, s in zip(gradients, state)])
state_next = list(state_next)
return deltas, state_next
def time_step(t, fx_array, x, state):
"""While loop body."""
x_next = list(x)
state_next = []
with tf.name_scope("fx"):
fx = _make_with_custom_variables(make_loss, x)
fx_array = fx_array.write(t, fx)
with tf.name_scope("dx"):
for subset, key, s_i in zip(subsets, net_keys, state):
x_i = [x[j] for j in subset]
deltas, s_i_next = update(nets[key], fx, x_i, s_i)
for idx, j in enumerate(subset):
x_next[j] += deltas[idx]
state_next.append(s_i_next)
with tf.name_scope("t_next"):
t_next = t + 1
return t_next, fx_array, x_next, state_next
# Define the while loop.
fx_array = tf.TensorArray(tf.float32, size=len_unroll + 1,
clear_after_read=False)
_, fx_array, x_final, s_final = tf.while_loop(
cond=lambda t, *_: t < len_unroll,
body=time_step,
loop_vars=(0, fx_array, x, state),
parallel_iterations=1,
swap_memory=True,
name="unroll")
with tf.name_scope("fx"):
fx_final = _make_with_custom_variables(make_loss, x_final)
fx_array = fx_array.write(len_unroll, fx_final)
loss = tf.reduce_sum(fx_array.stack(), name="loss")
# Reset the state; should be called at the beginning of an epoch.
with tf.name_scope("reset"):
variables = (nest.flatten(state) +
x + constants)
# Empty array as part of the reset process.
reset = [tf.variables_initializer(variables), fx_array.close()]
# Operator to update the parameters and the RNN state after our loop, but
# during an epoch.
with tf.name_scope("update"):
update = (nest.flatten(_nested_assign(x, x_final)) +
nest.flatten(_nested_assign(state, s_final)))
# Log internal variables.
for k, net in nets.items():
print("Optimizer '{}' variables".format(k))
print([op.name for op in snt.get_variables_in_module(net)])
return MetaLoss(loss, update, reset, fx_final, x_final)
def meta_minimize(self, make_loss, len_unroll, learning_rate=0.01, **kwargs):
"""Returns an operator minimizing the meta-loss.
Args:
make_loss: Callable which returns the optimizee loss; note that this
should create its ops in the default graph.
len_unroll: Number of steps to unroll.
learning_rate: Learning rate for the Adam optimizer.
**kwargs: keyword arguments forwarded to meta_loss.
Returns:
namedtuple containing (step, update, reset, fx, x)
"""
info = self.meta_loss(make_loss, len_unroll, **kwargs)
optimizer = tf.train.AdamOptimizer(learning_rate)
step = optimizer.minimize(info.loss)
return MetaStep(step, *info[1:])
| learning-to-learn-master | meta.py |
from setuptools import setup
setup(
name='dnc',
version='0.0.2',
description='This package provides an implementation of the Differentiable Neural Computer, as published in Nature.',
license='Apache Software License 2.0',
packages=['dnc'],
author='DeepMind',
keywords=['tensorflow', 'differentiable neural computer', 'dnc', 'deepmind', 'deep mind', 'sonnet', 'dm-sonnet', 'machine learning'],
url='https://github.com/deepmind/dnc'
)
| dnc-master | setup.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example script to train the DNC on a repeated copy task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sonnet as snt
from dnc import dnc
from dnc import repeat_copy
FLAGS = tf.flags.FLAGS
# Model parameters
tf.flags.DEFINE_integer("hidden_size", 64, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("memory_size", 16, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 16, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_write_heads", 1, "Number of memory write heads.")
tf.flags.DEFINE_integer("num_read_heads", 4, "Number of memory read heads.")
tf.flags.DEFINE_integer("clip_value", 20,
"Maximum absolute value of controller and dnc outputs.")
# Optimizer parameters.
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10,
"Epsilon used for RMSProp optimizer.")
# Task parameters
tf.flags.DEFINE_integer("batch_size", 16, "Batch size for training.")
tf.flags.DEFINE_integer("num_bits", 4, "Dimensionality of each vector to copy")
tf.flags.DEFINE_integer(
"min_length", 1,
"Lower limit on number of vectors in the observation pattern to copy")
tf.flags.DEFINE_integer(
"max_length", 2,
"Upper limit on number of vectors in the observation pattern to copy")
tf.flags.DEFINE_integer("min_repeats", 1,
"Lower limit on number of copy repeats.")
tf.flags.DEFINE_integer("max_repeats", 2,
"Upper limit on number of copy repeats.")
# Training options.
tf.flags.DEFINE_integer("num_training_iterations", 100000,
"Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 100,
"Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_string("checkpoint_dir", "/tmp/tf/dnc",
"Checkpointing directory.")
tf.flags.DEFINE_integer("checkpoint_interval", -1,
"Checkpointing step interval.")
def run_model(input_sequence, output_size):
"""Runs model on input sequence."""
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
}
clip_value = FLAGS.clip_value
dnc_core = dnc.DNC(access_config, controller_config, output_size, clip_value)
initial_state = dnc_core.initial_state(FLAGS.batch_size)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=dnc_core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
return output_sequence
def train(num_training_iterations, report_interval):
"""Trains the DNC and periodically reports the loss."""
dataset = repeat_copy.RepeatCopy(FLAGS.num_bits, FLAGS.batch_size,
FLAGS.min_length, FLAGS.max_length,
FLAGS.min_repeats, FLAGS.max_repeats)
dataset_tensors = dataset()
output_logits = run_model(dataset_tensors.observations, dataset.target_size)
# Used for visualization.
output = tf.round(
tf.expand_dims(dataset_tensors.mask, -1) * tf.sigmoid(output_logits))
train_loss = dataset.cost(output_logits, dataset_tensors.target,
dataset_tensors.mask)
# Set up optimizer with global norm clipping.
trainable_variables = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(train_loss, trainable_variables), FLAGS.max_grad_norm)
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])
optimizer = tf.train.RMSPropOptimizer(
FLAGS.learning_rate, epsilon=FLAGS.optimizer_epsilon)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables), global_step=global_step)
saver = tf.train.Saver()
if FLAGS.checkpoint_interval > 0:
hooks = [
tf.train.CheckpointSaverHook(
checkpoint_dir=FLAGS.checkpoint_dir,
save_steps=FLAGS.checkpoint_interval,
saver=saver)
]
else:
hooks = []
# Train.
with tf.train.SingularMonitoredSession(
hooks=hooks, checkpoint_dir=FLAGS.checkpoint_dir) as sess:
start_iteration = sess.run(global_step)
total_loss = 0
for train_iteration in range(start_iteration, num_training_iterations):
_, loss = sess.run([train_step, train_loss])
total_loss += loss
if (train_iteration + 1) % report_interval == 0:
dataset_tensors_np, output_np = sess.run([dataset_tensors, output])
dataset_string = dataset.to_human_readable(dataset_tensors_np,
output_np)
tf.logging.info("%d: Avg training loss %f.\n%s",
train_iteration, total_loss / report_interval,
dataset_string)
total_loss = 0
def main(unused_argv):
tf.logging.set_verbosity(3) # Print INFO log messages.
train(FLAGS.num_training_iterations, FLAGS.report_interval)
if __name__ == "__main__":
tf.app.run()
| dnc-master | train.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
from dnc import access
from dnc import util
BATCH_SIZE = 2
MEMORY_SIZE = 20
WORD_SIZE = 6
NUM_READS = 2
NUM_WRITES = 3
TIME_STEPS = 4
INPUT_SIZE = 10
class MemoryAccessTest(tf.test.TestCase):
def setUp(self):
self.module = access.MemoryAccess(MEMORY_SIZE, WORD_SIZE, NUM_READS,
NUM_WRITES)
self.initial_state = self.module.initial_state(BATCH_SIZE)
def testBuildAndTrain(self):
inputs = tf.random_normal([TIME_STEPS, BATCH_SIZE, INPUT_SIZE])
output, _ = rnn.dynamic_rnn(
cell=self.module,
inputs=inputs,
initial_state=self.initial_state,
time_major=True)
targets = np.random.rand(TIME_STEPS, BATCH_SIZE, NUM_READS, WORD_SIZE)
loss = tf.reduce_mean(tf.square(output - targets))
train_op = tf.train.GradientDescentOptimizer(1).minimize(loss)
init = tf.global_variables_initializer()
with self.test_session():
init.run()
train_op.run()
def testValidReadMode(self):
inputs = self.module._read_inputs(
tf.random_normal([BATCH_SIZE, INPUT_SIZE]))
init = tf.global_variables_initializer()
with self.test_session() as sess:
init.run()
inputs = sess.run(inputs)
# Check that the read modes for each read head constitute a probability
# distribution.
self.assertAllClose(inputs['read_mode'].sum(2),
np.ones([BATCH_SIZE, NUM_READS]))
self.assertGreaterEqual(inputs['read_mode'].min(), 0)
def testWriteWeights(self):
memory = 10 * (np.random.rand(BATCH_SIZE, MEMORY_SIZE, WORD_SIZE) - 0.5)
usage = np.random.rand(BATCH_SIZE, MEMORY_SIZE)
allocation_gate = np.random.rand(BATCH_SIZE, NUM_WRITES)
write_gate = np.random.rand(BATCH_SIZE, NUM_WRITES)
write_content_keys = np.random.rand(BATCH_SIZE, NUM_WRITES, WORD_SIZE)
write_content_strengths = np.random.rand(BATCH_SIZE, NUM_WRITES)
# Check that turning on allocation gate fully brings the write gate to
# the allocation weighting (which we will control by controlling the usage).
usage[:, 3] = 0
allocation_gate[:, 0] = 1
write_gate[:, 0] = 1
inputs = {
'allocation_gate': tf.constant(allocation_gate),
'write_gate': tf.constant(write_gate),
'write_content_keys': tf.constant(write_content_keys),
'write_content_strengths': tf.constant(write_content_strengths)
}
weights = self.module._write_weights(inputs,
tf.constant(memory),
tf.constant(usage))
with self.test_session():
weights = weights.eval()
# Check the weights sum to their target gating.
self.assertAllClose(np.sum(weights, axis=2), write_gate, atol=5e-2)
# Check that we fully allocated to the third row.
weights_0_0_target = util.one_hot(MEMORY_SIZE, 3)
self.assertAllClose(weights[0, 0], weights_0_0_target, atol=1e-3)
def testReadWeights(self):
memory = 10 * (np.random.rand(BATCH_SIZE, MEMORY_SIZE, WORD_SIZE) - 0.5)
prev_read_weights = np.random.rand(BATCH_SIZE, NUM_READS, MEMORY_SIZE)
prev_read_weights /= prev_read_weights.sum(2, keepdims=True) + 1
link = np.random.rand(BATCH_SIZE, NUM_WRITES, MEMORY_SIZE, MEMORY_SIZE)
# Row and column sums should be at most 1:
link /= np.maximum(link.sum(2, keepdims=True), 1)
link /= np.maximum(link.sum(3, keepdims=True), 1)
# We query the memory on the third location in memory, and select a large
# strength on the query. Then we select a content-based read-mode.
read_content_keys = np.random.rand(BATCH_SIZE, NUM_READS, WORD_SIZE)
read_content_keys[0, 0] = memory[0, 3]
read_content_strengths = tf.constant(
100., shape=[BATCH_SIZE, NUM_READS], dtype=tf.float64)
read_mode = np.random.rand(BATCH_SIZE, NUM_READS, 1 + 2 * NUM_WRITES)
read_mode[0, 0, :] = util.one_hot(1 + 2 * NUM_WRITES, 2 * NUM_WRITES)
inputs = {
'read_content_keys': tf.constant(read_content_keys),
'read_content_strengths': read_content_strengths,
'read_mode': tf.constant(read_mode),
}
read_weights = self.module._read_weights(inputs, memory, prev_read_weights,
link)
with self.test_session():
read_weights = read_weights.eval()
# read_weights for batch 0, read head 0 should be memory location 3
self.assertAllClose(
read_weights[0, 0, :], util.one_hot(MEMORY_SIZE, 3), atol=1e-3)
def testGradients(self):
inputs = tf.constant(np.random.randn(BATCH_SIZE, INPUT_SIZE), tf.float32)
output, _ = self.module(inputs, self.initial_state)
loss = tf.reduce_sum(output)
tensors_to_check = [
inputs, self.initial_state.memory, self.initial_state.read_weights,
self.initial_state.linkage.precedence_weights,
self.initial_state.linkage.link
]
shapes = [x.get_shape().as_list() for x in tensors_to_check]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
err = tf.test.compute_gradient_error(tensors_to_check, shapes, loss, [1])
self.assertLess(err, 0.1)
if __name__ == '__main__':
tf.test.main()
| dnc-master | dnc/access_test.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC util ops and modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def batch_invert_permutation(permutations):
"""Returns batched `tf.invert_permutation` for every row in `permutations`."""
with tf.name_scope('batch_invert_permutation', values=[permutations]):
perm = tf.cast(permutations, tf.float32)
dim = int(perm.get_shape()[-1])
size = tf.cast(tf.shape(perm)[0], tf.float32)
delta = tf.cast(tf.shape(perm)[-1], tf.float32)
rg = tf.range(0, size * delta, delta, dtype=tf.float32)
rg = tf.expand_dims(rg, 1)
rg = tf.tile(rg, [1, dim])
perm = tf.add(perm, rg)
flat = tf.reshape(perm, [-1])
perm = tf.invert_permutation(tf.cast(flat, tf.int32))
perm = tf.reshape(perm, [-1, dim])
return tf.subtract(perm, tf.cast(rg, tf.int32))
def batch_gather(values, indices):
"""Returns batched `tf.gather` for every row in the input."""
with tf.name_scope('batch_gather', values=[values, indices]):
idx = tf.expand_dims(indices, -1)
size = tf.shape(indices)[0]
rg = tf.range(size, dtype=tf.int32)
rg = tf.expand_dims(rg, -1)
rg = tf.tile(rg, [1, int(indices.get_shape()[-1])])
rg = tf.expand_dims(rg, -1)
gidx = tf.concat([rg, idx], -1)
return tf.gather_nd(values, gidx)
def one_hot(length, index):
"""Return an nd array of given `length` filled with 0s and a 1 at `index`."""
result = np.zeros(length)
result[index] = 1
return result
def reduce_prod(x, axis, name=None):
"""Efficient reduce product over axis.
Uses tf.cumprod and tf.gather_nd as a workaround to the poor performance of calculating tf.reduce_prod's gradient on CPU.
"""
with tf.name_scope(name, 'util_reduce_prod', values=[x]):
cp = tf.cumprod(x, axis, reverse=True)
size = tf.shape(cp)[0]
idx1 = tf.range(tf.cast(size, tf.float32), dtype=tf.float32)
idx2 = tf.zeros([size], tf.float32)
indices = tf.stack([idx1, idx2], 1)
return tf.gather_nd(cp, tf.cast(indices, tf.int32))
| dnc-master | dnc/util.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory addressing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sonnet as snt
import tensorflow as tf
from dnc import addressing
from dnc import util
try:
xrange
except NameError:
xrange = range
class WeightedSoftmaxTest(tf.test.TestCase):
def testValues(self):
batch_size = 5
num_heads = 3
memory_size = 7
activations_data = np.random.randn(batch_size, num_heads, memory_size)
weights_data = np.ones((batch_size, num_heads))
activations = tf.placeholder(tf.float32,
[batch_size, num_heads, memory_size])
weights = tf.placeholder(tf.float32, [batch_size, num_heads])
# Run weighted softmax with identity placed on weights. Output should be
# equal to a standalone softmax.
observed = addressing.weighted_softmax(activations, weights, tf.identity)
expected = snt.BatchApply(
module_or_op=tf.nn.softmax, name='BatchSoftmax')(activations)
with self.test_session() as sess:
observed = sess.run(
observed,
feed_dict={activations: activations_data,
weights: weights_data})
expected = sess.run(expected, feed_dict={activations: activations_data})
self.assertAllClose(observed, expected)
class CosineWeightsTest(tf.test.TestCase):
def testShape(self):
batch_size = 5
num_heads = 3
memory_size = 7
word_size = 2
module = addressing.CosineWeights(num_heads, word_size)
mem = tf.placeholder(tf.float32, [batch_size, memory_size, word_size])
keys = tf.placeholder(tf.float32, [batch_size, num_heads, word_size])
strengths = tf.placeholder(tf.float32, [batch_size, num_heads])
weights = module(mem, keys, strengths)
self.assertTrue(weights.get_shape().is_compatible_with(
[batch_size, num_heads, memory_size]))
def testValues(self):
batch_size = 5
num_heads = 4
memory_size = 10
word_size = 2
mem_data = np.random.randn(batch_size, memory_size, word_size)
np.copyto(mem_data[0, 0], [1, 2])
np.copyto(mem_data[0, 1], [3, 4])
np.copyto(mem_data[0, 2], [5, 6])
keys_data = np.random.randn(batch_size, num_heads, word_size)
np.copyto(keys_data[0, 0], [5, 6])
np.copyto(keys_data[0, 1], [1, 2])
np.copyto(keys_data[0, 2], [5, 6])
np.copyto(keys_data[0, 3], [3, 4])
strengths_data = np.random.randn(batch_size, num_heads)
module = addressing.CosineWeights(num_heads, word_size)
mem = tf.placeholder(tf.float32, [batch_size, memory_size, word_size])
keys = tf.placeholder(tf.float32, [batch_size, num_heads, word_size])
strengths = tf.placeholder(tf.float32, [batch_size, num_heads])
weights = module(mem, keys, strengths)
with self.test_session() as sess:
result = sess.run(
weights,
feed_dict={mem: mem_data,
keys: keys_data,
strengths: strengths_data})
# Manually checks results.
strengths_softplus = np.log(1 + np.exp(strengths_data))
similarity = np.zeros((memory_size))
for b in xrange(batch_size):
for h in xrange(num_heads):
key = keys_data[b, h]
key_norm = np.linalg.norm(key)
for m in xrange(memory_size):
row = mem_data[b, m]
similarity[m] = np.dot(key, row) / (key_norm * np.linalg.norm(row))
similarity = np.exp(similarity * strengths_softplus[b, h])
similarity /= similarity.sum()
self.assertAllClose(result[b, h], similarity, atol=1e-4, rtol=1e-4)
def testDivideByZero(self):
batch_size = 5
num_heads = 4
memory_size = 10
word_size = 2
module = addressing.CosineWeights(num_heads, word_size)
keys = tf.random_normal([batch_size, num_heads, word_size])
strengths = tf.random_normal([batch_size, num_heads])
# First row of memory is non-zero to concentrate attention on this location.
# Remaining rows are all zero.
first_row_ones = tf.ones([batch_size, 1, word_size], dtype=tf.float32)
remaining_zeros = tf.zeros(
[batch_size, memory_size - 1, word_size], dtype=tf.float32)
mem = tf.concat((first_row_ones, remaining_zeros), 1)
output = module(mem, keys, strengths)
gradients = tf.gradients(output, [mem, keys, strengths])
with self.test_session() as sess:
output, gradients = sess.run([output, gradients])
self.assertFalse(np.any(np.isnan(output)))
self.assertFalse(np.any(np.isnan(gradients[0])))
self.assertFalse(np.any(np.isnan(gradients[1])))
self.assertFalse(np.any(np.isnan(gradients[2])))
class TemporalLinkageTest(tf.test.TestCase):
def testModule(self):
batch_size = 7
memory_size = 4
num_reads = 11
num_writes = 5
module = addressing.TemporalLinkage(
memory_size=memory_size, num_writes=num_writes)
prev_link_in = tf.placeholder(
tf.float32, (batch_size, num_writes, memory_size, memory_size))
prev_precedence_weights_in = tf.placeholder(
tf.float32, (batch_size, num_writes, memory_size))
write_weights_in = tf.placeholder(tf.float32,
(batch_size, num_writes, memory_size))
state = addressing.TemporalLinkageState(
link=np.zeros([batch_size, num_writes, memory_size, memory_size]),
precedence_weights=np.zeros([batch_size, num_writes, memory_size]))
calc_state = module(write_weights_in,
addressing.TemporalLinkageState(
link=prev_link_in,
precedence_weights=prev_precedence_weights_in))
with self.test_session() as sess:
num_steps = 5
for i in xrange(num_steps):
write_weights = np.random.rand(batch_size, num_writes, memory_size)
write_weights /= write_weights.sum(2, keepdims=True) + 1
# Simulate (in final steps) link 0-->1 in head 0 and 3-->2 in head 1
if i == num_steps - 2:
write_weights[0, 0, :] = util.one_hot(memory_size, 0)
write_weights[0, 1, :] = util.one_hot(memory_size, 3)
elif i == num_steps - 1:
write_weights[0, 0, :] = util.one_hot(memory_size, 1)
write_weights[0, 1, :] = util.one_hot(memory_size, 2)
state = sess.run(
calc_state,
feed_dict={
prev_link_in: state.link,
prev_precedence_weights_in: state.precedence_weights,
write_weights_in: write_weights
})
# link should be bounded in range [0, 1]
self.assertGreaterEqual(state.link.min(), 0)
self.assertLessEqual(state.link.max(), 1)
# link diagonal should be zero
self.assertAllEqual(
state.link[:, :, range(memory_size), range(memory_size)],
np.zeros([batch_size, num_writes, memory_size]))
# link rows and columns should sum to at most 1
self.assertLessEqual(state.link.sum(2).max(), 1)
self.assertLessEqual(state.link.sum(3).max(), 1)
# records our transitions in batch 0: head 0: 0->1, and head 1: 3->2
self.assertAllEqual(state.link[0, 0, :, 0], util.one_hot(memory_size, 1))
self.assertAllEqual(state.link[0, 1, :, 3], util.one_hot(memory_size, 2))
# Now test calculation of forward and backward read weights
prev_read_weights = np.random.rand(batch_size, num_reads, memory_size)
prev_read_weights[0, 5, :] = util.one_hot(memory_size, 0) # read 5, posn 0
prev_read_weights[0, 6, :] = util.one_hot(memory_size, 2) # read 6, posn 2
forward_read_weights = module.directional_read_weights(
tf.constant(state.link),
tf.constant(prev_read_weights, dtype=tf.float32),
forward=True)
backward_read_weights = module.directional_read_weights(
tf.constant(state.link),
tf.constant(prev_read_weights, dtype=tf.float32),
forward=False)
with self.test_session():
forward_read_weights = forward_read_weights.eval()
backward_read_weights = backward_read_weights.eval()
# Check directional weights calculated correctly.
self.assertAllEqual(
forward_read_weights[0, 5, 0, :], # read=5, write=0
util.one_hot(memory_size, 1))
self.assertAllEqual(
backward_read_weights[0, 6, 1, :], # read=6, write=1
util.one_hot(memory_size, 3))
def testPrecedenceWeights(self):
batch_size = 7
memory_size = 3
num_writes = 5
module = addressing.TemporalLinkage(
memory_size=memory_size, num_writes=num_writes)
prev_precedence_weights = np.random.rand(batch_size, num_writes,
memory_size)
write_weights = np.random.rand(batch_size, num_writes, memory_size)
# These should sum to at most 1 for each write head in each batch.
write_weights /= write_weights.sum(2, keepdims=True) + 1
prev_precedence_weights /= prev_precedence_weights.sum(2, keepdims=True) + 1
write_weights[0, 1, :] = 0 # batch 0 head 1: no writing
write_weights[1, 2, :] /= write_weights[1, 2, :].sum() # b1 h2: all writing
precedence_weights = module._precedence_weights(
prev_precedence_weights=tf.constant(prev_precedence_weights),
write_weights=tf.constant(write_weights))
with self.test_session():
precedence_weights = precedence_weights.eval()
# precedence weights should be bounded in range [0, 1]
self.assertGreaterEqual(precedence_weights.min(), 0)
self.assertLessEqual(precedence_weights.max(), 1)
# no writing in batch 0, head 1
self.assertAllClose(precedence_weights[0, 1, :],
prev_precedence_weights[0, 1, :])
# all writing in batch 1, head 2
self.assertAllClose(precedence_weights[1, 2, :], write_weights[1, 2, :])
class FreenessTest(tf.test.TestCase):
def testModule(self):
batch_size = 5
memory_size = 11
num_reads = 3
num_writes = 7
module = addressing.Freeness(memory_size)
free_gate = np.random.rand(batch_size, num_reads)
# Produce read weights that sum to 1 for each batch and head.
prev_read_weights = np.random.rand(batch_size, num_reads, memory_size)
prev_read_weights[1, :, 3] = 0 # no read at batch 1, position 3; see below
prev_read_weights /= prev_read_weights.sum(2, keepdims=True)
prev_write_weights = np.random.rand(batch_size, num_writes, memory_size)
prev_write_weights /= prev_write_weights.sum(2, keepdims=True)
prev_usage = np.random.rand(batch_size, memory_size)
# Add some special values that allows us to test the behaviour:
prev_write_weights[1, 2, 3] = 1 # full write in batch 1, head 2, position 3
prev_read_weights[2, 0, 4] = 1 # full read at batch 2, head 0, position 4
free_gate[2, 0] = 1 # can free up all locations for batch 2, read head 0
usage = module(
tf.constant(prev_write_weights),
tf.constant(free_gate),
tf.constant(prev_read_weights), tf.constant(prev_usage))
with self.test_session():
usage = usage.eval()
# Check all usages are between 0 and 1.
self.assertGreaterEqual(usage.min(), 0)
self.assertLessEqual(usage.max(), 1)
# Check that the full write at batch 1, position 3 makes it fully used.
self.assertEqual(usage[1][3], 1)
# Check that the full free at batch 2, position 4 makes it fully free.
self.assertEqual(usage[2][4], 0)
def testWriteAllocationWeights(self):
batch_size = 7
memory_size = 23
num_writes = 5
module = addressing.Freeness(memory_size)
usage = np.random.rand(batch_size, memory_size)
write_gates = np.random.rand(batch_size, num_writes)
# Turn off gates for heads 1 and 3 in batch 0. This doesn't scaling down the
# weighting, but it means that the usage doesn't change, so we should get
# the same allocation weightings for: (1, 2) and (3, 4) (but all others
# being different).
write_gates[0, 1] = 0
write_gates[0, 3] = 0
# and turn heads 0 and 2 on for full effect.
write_gates[0, 0] = 1
write_gates[0, 2] = 1
# In batch 1, make one of the usages 0 and another almost 0, so that these
# entries get most of the allocation weights for the first and second heads.
usage[1] = usage[1] * 0.9 + 0.1 # make sure all entries are in [0.1, 1]
usage[1][4] = 0 # write head 0 should get allocated to position 4
usage[1][3] = 1e-4 # write head 1 should get allocated to position 3
write_gates[1, 0] = 1 # write head 0 fully on
write_gates[1, 1] = 1 # write head 1 fully on
weights = module.write_allocation_weights(
usage=tf.constant(usage),
write_gates=tf.constant(write_gates),
num_writes=num_writes)
with self.test_session():
weights = weights.eval()
# Check that all weights are between 0 and 1
self.assertGreaterEqual(weights.min(), 0)
self.assertLessEqual(weights.max(), 1)
# Check that weights sum to close to 1
self.assertAllClose(
np.sum(weights, axis=2), np.ones([batch_size, num_writes]), atol=1e-3)
# Check the same / different allocation weight pairs as described above.
self.assertGreater(np.abs(weights[0, 0, :] - weights[0, 1, :]).max(), 0.1)
self.assertAllEqual(weights[0, 1, :], weights[0, 2, :])
self.assertGreater(np.abs(weights[0, 2, :] - weights[0, 3, :]).max(), 0.1)
self.assertAllEqual(weights[0, 3, :], weights[0, 4, :])
self.assertAllClose(weights[1][0], util.one_hot(memory_size, 4), atol=1e-3)
self.assertAllClose(weights[1][1], util.one_hot(memory_size, 3), atol=1e-3)
def testWriteAllocationWeightsGradient(self):
batch_size = 7
memory_size = 5
num_writes = 3
module = addressing.Freeness(memory_size)
usage = tf.constant(np.random.rand(batch_size, memory_size))
write_gates = tf.constant(np.random.rand(batch_size, num_writes))
weights = module.write_allocation_weights(usage, write_gates, num_writes)
with self.test_session():
err = tf.test.compute_gradient_error(
[usage, write_gates],
[usage.get_shape().as_list(), write_gates.get_shape().as_list()],
weights,
weights.get_shape().as_list(),
delta=1e-5)
self.assertLess(err, 0.01)
def testAllocation(self):
batch_size = 7
memory_size = 13
usage = np.random.rand(batch_size, memory_size)
module = addressing.Freeness(memory_size)
allocation = module._allocation(tf.constant(usage))
with self.test_session():
allocation = allocation.eval()
# 1. Test that max allocation goes to min usage, and vice versa.
self.assertAllEqual(np.argmin(usage, axis=1), np.argmax(allocation, axis=1))
self.assertAllEqual(np.argmax(usage, axis=1), np.argmin(allocation, axis=1))
# 2. Test that allocations sum to almost 1.
self.assertAllClose(np.sum(allocation, axis=1), np.ones(batch_size), 0.01)
def testAllocationGradient(self):
batch_size = 1
memory_size = 5
usage = tf.constant(np.random.rand(batch_size, memory_size))
module = addressing.Freeness(memory_size)
allocation = module._allocation(usage)
with self.test_session():
err = tf.test.compute_gradient_error(
usage,
usage.get_shape().as_list(),
allocation,
allocation.get_shape().as_list(),
delta=1e-5)
self.assertLess(err, 0.01)
if __name__ == '__main__':
tf.test.main()
| dnc-master | dnc/addressing_test.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC access modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow as tf
from dnc import addressing
from dnc import util
AccessState = collections.namedtuple('AccessState', (
'memory', 'read_weights', 'write_weights', 'linkage', 'usage'))
def _erase_and_write(memory, address, reset_weights, values):
"""Module to erase and write in the external memory.
Erase operation:
M_t'(i) = M_{t-1}(i) * (1 - w_t(i) * e_t)
Add operation:
M_t(i) = M_t'(i) + w_t(i) * a_t
where e are the reset_weights, w the write weights and a the values.
Args:
memory: 3-D tensor of shape `[batch_size, memory_size, word_size]`.
address: 3-D tensor `[batch_size, num_writes, memory_size]`.
reset_weights: 3-D tensor `[batch_size, num_writes, word_size]`.
values: 3-D tensor `[batch_size, num_writes, word_size]`.
Returns:
3-D tensor of shape `[batch_size, num_writes, word_size]`.
"""
with tf.name_scope('erase_memory', values=[memory, address, reset_weights]):
expand_address = tf.expand_dims(address, 3)
reset_weights = tf.expand_dims(reset_weights, 2)
weighted_resets = expand_address * reset_weights
reset_gate = util.reduce_prod(1 - weighted_resets, 1)
memory *= reset_gate
with tf.name_scope('additive_write', values=[memory, address, values]):
add_matrix = tf.matmul(address, values, adjoint_a=True)
memory += add_matrix
return memory
class MemoryAccess(snt.RNNCore):
"""Access module of the Differentiable Neural Computer.
This memory module supports multiple read and write heads. It makes use of:
* `addressing.TemporalLinkage` to track the temporal ordering of writes in
memory for each write head.
* `addressing.FreenessAllocator` for keeping track of memory usage, where
usage increase when a memory location is written to, and decreases when
memory is read from that the controller says can be freed.
Write-address selection is done by an interpolation between content-based
lookup and using unused memory.
Read-address selection is done by an interpolation of content-based lookup
and following the link graph in the forward or backwards read direction.
"""
def __init__(self,
memory_size=128,
word_size=20,
num_reads=1,
num_writes=1,
name='memory_access'):
"""Creates a MemoryAccess module.
Args:
memory_size: The number of memory slots (N in the DNC paper).
word_size: The width of each memory slot (W in the DNC paper)
num_reads: The number of read heads (R in the DNC paper).
num_writes: The number of write heads (fixed at 1 in the paper).
name: The name of the module.
"""
super(MemoryAccess, self).__init__(name=name)
self._memory_size = memory_size
self._word_size = word_size
self._num_reads = num_reads
self._num_writes = num_writes
self._write_content_weights_mod = addressing.CosineWeights(
num_writes, word_size, name='write_content_weights')
self._read_content_weights_mod = addressing.CosineWeights(
num_reads, word_size, name='read_content_weights')
self._linkage = addressing.TemporalLinkage(memory_size, num_writes)
self._freeness = addressing.Freeness(memory_size)
def _build(self, inputs, prev_state):
"""Connects the MemoryAccess module into the graph.
Args:
inputs: tensor of shape `[batch_size, input_size]`. This is used to
control this access module.
prev_state: Instance of `AccessState` containing the previous state.
Returns:
A tuple `(output, next_state)`, where `output` is a tensor of shape
`[batch_size, num_reads, word_size]`, and `next_state` is the new
`AccessState` named tuple at the current time t.
"""
inputs = self._read_inputs(inputs)
# Update usage using inputs['free_gate'] and previous read & write weights.
usage = self._freeness(
write_weights=prev_state.write_weights,
free_gate=inputs['free_gate'],
read_weights=prev_state.read_weights,
prev_usage=prev_state.usage)
# Write to memory.
write_weights = self._write_weights(inputs, prev_state.memory, usage)
memory = _erase_and_write(
prev_state.memory,
address=write_weights,
reset_weights=inputs['erase_vectors'],
values=inputs['write_vectors'])
linkage_state = self._linkage(write_weights, prev_state.linkage)
# Read from memory.
read_weights = self._read_weights(
inputs,
memory=memory,
prev_read_weights=prev_state.read_weights,
link=linkage_state.link)
read_words = tf.matmul(read_weights, memory)
return (read_words, AccessState(
memory=memory,
read_weights=read_weights,
write_weights=write_weights,
linkage=linkage_state,
usage=usage))
def _read_inputs(self, inputs):
"""Applies transformations to `inputs` to get control for this module."""
def _linear(first_dim, second_dim, name, activation=None):
"""Returns a linear transformation of `inputs`, followed by a reshape."""
linear = snt.Linear(first_dim * second_dim, name=name)(inputs)
if activation is not None:
linear = activation(linear, name=name + '_activation')
return tf.reshape(linear, [-1, first_dim, second_dim])
# v_t^i - The vectors to write to memory, for each write head `i`.
write_vectors = _linear(self._num_writes, self._word_size, 'write_vectors')
# e_t^i - Amount to erase the memory by before writing, for each write head.
erase_vectors = _linear(self._num_writes, self._word_size, 'erase_vectors',
tf.sigmoid)
# f_t^j - Amount that the memory at the locations read from at the previous
# time step can be declared unused, for each read head `j`.
free_gate = tf.sigmoid(
snt.Linear(self._num_reads, name='free_gate')(inputs))
# g_t^{a, i} - Interpolation between writing to unallocated memory and
# content-based lookup, for each write head `i`. Note: `a` is simply used to
# identify this gate with allocation vs writing (as defined below).
allocation_gate = tf.sigmoid(
snt.Linear(self._num_writes, name='allocation_gate')(inputs))
# g_t^{w, i} - Overall gating of write amount for each write head.
write_gate = tf.sigmoid(
snt.Linear(self._num_writes, name='write_gate')(inputs))
# \pi_t^j - Mixing between "backwards" and "forwards" positions (for
# each write head), and content-based lookup, for each read head.
num_read_modes = 1 + 2 * self._num_writes
read_mode = snt.BatchApply(tf.nn.softmax)(
_linear(self._num_reads, num_read_modes, name='read_mode'))
# Parameters for the (read / write) "weights by content matching" modules.
write_keys = _linear(self._num_writes, self._word_size, 'write_keys')
write_strengths = snt.Linear(self._num_writes, name='write_strengths')(
inputs)
read_keys = _linear(self._num_reads, self._word_size, 'read_keys')
read_strengths = snt.Linear(self._num_reads, name='read_strengths')(inputs)
result = {
'read_content_keys': read_keys,
'read_content_strengths': read_strengths,
'write_content_keys': write_keys,
'write_content_strengths': write_strengths,
'write_vectors': write_vectors,
'erase_vectors': erase_vectors,
'free_gate': free_gate,
'allocation_gate': allocation_gate,
'write_gate': write_gate,
'read_mode': read_mode,
}
return result
def _write_weights(self, inputs, memory, usage):
"""Calculates the memory locations to write to.
This uses a combination of content-based lookup and finding an unused
location in memory, for each write head.
Args:
inputs: Collection of inputs to the access module, including controls for
how to chose memory writing, such as the content to look-up and the
weighting between content-based and allocation-based addressing.
memory: A tensor of shape `[batch_size, memory_size, word_size]`
containing the current memory contents.
usage: Current memory usage, which is a tensor of shape `[batch_size,
memory_size]`, used for allocation-based addressing.
Returns:
tensor of shape `[batch_size, num_writes, memory_size]` indicating where
to write to (if anywhere) for each write head.
"""
with tf.name_scope('write_weights', values=[inputs, memory, usage]):
# c_t^{w, i} - The content-based weights for each write head.
write_content_weights = self._write_content_weights_mod(
memory, inputs['write_content_keys'],
inputs['write_content_strengths'])
# a_t^i - The allocation weights for each write head.
write_allocation_weights = self._freeness.write_allocation_weights(
usage=usage,
write_gates=(inputs['allocation_gate'] * inputs['write_gate']),
num_writes=self._num_writes)
# Expands gates over memory locations.
allocation_gate = tf.expand_dims(inputs['allocation_gate'], -1)
write_gate = tf.expand_dims(inputs['write_gate'], -1)
# w_t^{w, i} - The write weightings for each write head.
return write_gate * (allocation_gate * write_allocation_weights +
(1 - allocation_gate) * write_content_weights)
def _read_weights(self, inputs, memory, prev_read_weights, link):
"""Calculates read weights for each read head.
The read weights are a combination of following the link graphs in the
forward or backward directions from the previous read position, and doing
content-based lookup. The interpolation between these different modes is
done by `inputs['read_mode']`.
Args:
inputs: Controls for this access module. This contains the content-based
keys to lookup, and the weightings for the different read modes.
memory: A tensor of shape `[batch_size, memory_size, word_size]`
containing the current memory contents to do content-based lookup.
prev_read_weights: A tensor of shape `[batch_size, num_reads,
memory_size]` containing the previous read locations.
link: A tensor of shape `[batch_size, num_writes, memory_size,
memory_size]` containing the temporal write transition graphs.
Returns:
A tensor of shape `[batch_size, num_reads, memory_size]` containing the
read weights for each read head.
"""
with tf.name_scope(
'read_weights', values=[inputs, memory, prev_read_weights, link]):
# c_t^{r, i} - The content weightings for each read head.
content_weights = self._read_content_weights_mod(
memory, inputs['read_content_keys'], inputs['read_content_strengths'])
# Calculates f_t^i and b_t^i.
forward_weights = self._linkage.directional_read_weights(
link, prev_read_weights, forward=True)
backward_weights = self._linkage.directional_read_weights(
link, prev_read_weights, forward=False)
backward_mode = inputs['read_mode'][:, :, :self._num_writes]
forward_mode = (
inputs['read_mode'][:, :, self._num_writes:2 * self._num_writes])
content_mode = inputs['read_mode'][:, :, 2 * self._num_writes]
read_weights = (
tf.expand_dims(content_mode, 2) * content_weights + tf.reduce_sum(
tf.expand_dims(forward_mode, 3) * forward_weights, 2) +
tf.reduce_sum(tf.expand_dims(backward_mode, 3) * backward_weights, 2))
return read_weights
@property
def state_size(self):
"""Returns a tuple of the shape of the state tensors."""
return AccessState(
memory=tf.TensorShape([self._memory_size, self._word_size]),
read_weights=tf.TensorShape([self._num_reads, self._memory_size]),
write_weights=tf.TensorShape([self._num_writes, self._memory_size]),
linkage=self._linkage.state_size,
usage=self._freeness.state_size)
@property
def output_size(self):
"""Returns the output shape."""
return tf.TensorShape([self._num_reads, self._word_size])
| dnc-master | dnc/access.py |
dnc-master | dnc/__init__.py |
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from dnc import util
try:
xrange
except NameError:
xrange = range
class BatchInvertPermutation(tf.test.TestCase):
def test(self):
# Tests that the _batch_invert_permutation function correctly inverts a
# batch of permutations.
batch_size = 5
length = 7
permutations = np.empty([batch_size, length], dtype=int)
for i in xrange(batch_size):
permutations[i] = np.random.permutation(length)
inverse = util.batch_invert_permutation(tf.constant(permutations, tf.int32))
with self.test_session():
inverse = inverse.eval()
for i in xrange(batch_size):
for j in xrange(length):
self.assertEqual(permutations[i][inverse[i][j]], j)
class BatchGather(tf.test.TestCase):
def test(self):
values = np.array([[3, 1, 4, 1], [5, 9, 2, 6], [5, 3, 5, 7]])
indexs = np.array([[1, 2, 0, 3], [3, 0, 1, 2], [0, 2, 1, 3]])
target = np.array([[1, 4, 3, 1], [6, 5, 9, 2], [5, 5, 3, 7]])
result = util.batch_gather(tf.constant(values), tf.constant(indexs))
with self.test_session():
result = result.eval()
self.assertAllEqual(target, result)
if __name__ == '__main__':
tf.test.main()
| dnc-master | dnc/util_test.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC Cores.
These modules create a DNC core. They take input, pass parameters to the memory
access module, and integrate the output of memory to form an output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import sonnet as snt
import tensorflow as tf
from dnc import access
DNCState = collections.namedtuple('DNCState', ('access_output', 'access_state',
'controller_state'))
class DNC(snt.RNNCore):
"""DNC core module.
Contains controller and memory access module.
"""
def __init__(self,
access_config,
controller_config,
output_size,
clip_value=None,
name='dnc'):
"""Initializes the DNC core.
Args:
access_config: dictionary of access module configurations.
controller_config: dictionary of controller (LSTM) module configurations.
output_size: output dimension size of core.
clip_value: clips controller and core output values to between
`[-clip_value, clip_value]` if specified.
name: module name (default 'dnc').
Raises:
TypeError: if direct_input_size is not None for any access module other
than KeyValueMemory.
"""
super(DNC, self).__init__(name=name)
with self._enter_variable_scope():
self._controller = snt.LSTM(**controller_config)
self._access = access.MemoryAccess(**access_config)
self._access_output_size = np.prod(self._access.output_size.as_list())
self._output_size = output_size
self._clip_value = clip_value or 0
self._output_size = tf.TensorShape([output_size])
self._state_size = DNCState(
access_output=self._access_output_size,
access_state=self._access.state_size,
controller_state=self._controller.state_size)
def _clip_if_enabled(self, x):
if self._clip_value > 0:
return tf.clip_by_value(x, -self._clip_value, self._clip_value)
else:
return x
def _build(self, inputs, prev_state):
"""Connects the DNC core into the graph.
Args:
inputs: Tensor input.
prev_state: A `DNCState` tuple containing the fields `access_output`,
`access_state` and `controller_state`. `access_state` is a 3-D Tensor
of shape `[batch_size, num_reads, word_size]` containing read words.
`access_state` is a tuple of the access module's state, and
`controller_state` is a tuple of controller module's state.
Returns:
A tuple `(output, next_state)` where `output` is a tensor and `next_state`
is a `DNCState` tuple containing the fields `access_output`,
`access_state`, and `controller_state`.
"""
prev_access_output = prev_state.access_output
prev_access_state = prev_state.access_state
prev_controller_state = prev_state.controller_state
batch_flatten = snt.BatchFlatten()
controller_input = tf.concat(
[batch_flatten(inputs), batch_flatten(prev_access_output)], 1)
controller_output, controller_state = self._controller(
controller_input, prev_controller_state)
controller_output = self._clip_if_enabled(controller_output)
controller_state = tf.contrib.framework.nest.map_structure(self._clip_if_enabled, controller_state)
access_output, access_state = self._access(controller_output,
prev_access_state)
output = tf.concat([controller_output, batch_flatten(access_output)], 1)
output = snt.Linear(
output_size=self._output_size.as_list()[0],
name='output_linear')(output)
output = self._clip_if_enabled(output)
return output, DNCState(
access_output=access_output,
access_state=access_state,
controller_state=controller_state)
def initial_state(self, batch_size, dtype=tf.float32):
return DNCState(
controller_state=self._controller.initial_state(batch_size, dtype),
access_state=self._access.initial_state(batch_size, dtype),
access_output=tf.zeros(
[batch_size] + self._access.output_size.as_list(), dtype))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
| dnc-master | dnc/dnc.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC addressing modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow as tf
from dnc import util
# Ensure values are greater than epsilon to avoid numerical instability.
_EPSILON = 1e-6
TemporalLinkageState = collections.namedtuple('TemporalLinkageState',
('link', 'precedence_weights'))
def _vector_norms(m):
squared_norms = tf.reduce_sum(m * m, axis=2, keepdims=True)
return tf.sqrt(squared_norms + _EPSILON)
def weighted_softmax(activations, strengths, strengths_op):
"""Returns softmax over activations multiplied by positive strengths.
Args:
activations: A tensor of shape `[batch_size, num_heads, memory_size]`, of
activations to be transformed. Softmax is taken over the last dimension.
strengths: A tensor of shape `[batch_size, num_heads]` containing strengths to
multiply by the activations prior to the softmax.
strengths_op: An operation to transform strengths before softmax.
Returns:
A tensor of same shape as `activations` with weighted softmax applied.
"""
transformed_strengths = tf.expand_dims(strengths_op(strengths), -1)
sharp_activations = activations * transformed_strengths
softmax = snt.BatchApply(module_or_op=tf.nn.softmax)
return softmax(sharp_activations)
class CosineWeights(snt.AbstractModule):
"""Cosine-weighted attention.
Calculates the cosine similarity between a query and each word in memory, then
applies a weighted softmax to return a sharp distribution.
"""
def __init__(self,
num_heads,
word_size,
strength_op=tf.nn.softplus,
name='cosine_weights'):
"""Initializes the CosineWeights module.
Args:
num_heads: number of memory heads.
word_size: memory word size.
strength_op: operation to apply to strengths (default is tf.nn.softplus).
name: module name (default 'cosine_weights')
"""
super(CosineWeights, self).__init__(name=name)
self._num_heads = num_heads
self._word_size = word_size
self._strength_op = strength_op
def _build(self, memory, keys, strengths):
"""Connects the CosineWeights module into the graph.
Args:
memory: A 3-D tensor of shape `[batch_size, memory_size, word_size]`.
keys: A 3-D tensor of shape `[batch_size, num_heads, word_size]`.
strengths: A 2-D tensor of shape `[batch_size, num_heads]`.
Returns:
Weights tensor of shape `[batch_size, num_heads, memory_size]`.
"""
# Calculates the inner product between the query vector and words in memory.
dot = tf.matmul(keys, memory, adjoint_b=True)
# Outer product to compute denominator (euclidean norm of query and memory).
memory_norms = _vector_norms(memory)
key_norms = _vector_norms(keys)
norm = tf.matmul(key_norms, memory_norms, adjoint_b=True)
# Calculates cosine similarity between the query vector and words in memory.
similarity = dot / (norm + _EPSILON)
return weighted_softmax(similarity, strengths, self._strength_op)
class TemporalLinkage(snt.RNNCore):
"""Keeps track of write order for forward and backward addressing.
This is a pseudo-RNNCore module, whose state is a pair `(link,
precedence_weights)`, where `link` is a (collection of) graphs for (possibly
multiple) write heads (represented by a tensor with values in the range
[0, 1]), and `precedence_weights` records the "previous write locations" used
to build the link graphs.
The function `directional_read_weights` computes addresses following the
forward and backward directions in the link graphs.
"""
def __init__(self, memory_size, num_writes, name='temporal_linkage'):
"""Construct a TemporalLinkage module.
Args:
memory_size: The number of memory slots.
num_writes: The number of write heads.
name: Name of the module.
"""
super(TemporalLinkage, self).__init__(name=name)
self._memory_size = memory_size
self._num_writes = num_writes
def _build(self, write_weights, prev_state):
"""Calculate the updated linkage state given the write weights.
Args:
write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`
containing the memory addresses of the different write heads.
prev_state: `TemporalLinkageState` tuple containg a tensor `link` of
shape `[batch_size, num_writes, memory_size, memory_size]`, and a
tensor `precedence_weights` of shape `[batch_size, num_writes,
memory_size]` containing the aggregated history of recent writes.
Returns:
A `TemporalLinkageState` tuple `next_state`, which contains the updated
link and precedence weights.
"""
link = self._link(prev_state.link, prev_state.precedence_weights,
write_weights)
precedence_weights = self._precedence_weights(prev_state.precedence_weights,
write_weights)
return TemporalLinkageState(
link=link, precedence_weights=precedence_weights)
def directional_read_weights(self, link, prev_read_weights, forward):
"""Calculates the forward or the backward read weights.
For each read head (at a given address), there are `num_writes` link graphs
to follow. Thus this function computes a read address for each of the
`num_reads * num_writes` pairs of read and write heads.
Args:
link: tensor of shape `[batch_size, num_writes, memory_size,
memory_size]` representing the link graphs L_t.
prev_read_weights: tensor of shape `[batch_size, num_reads,
memory_size]` containing the previous read weights w_{t-1}^r.
forward: Boolean indicating whether to follow the "future" direction in
the link graph (True) or the "past" direction (False).
Returns:
tensor of shape `[batch_size, num_reads, num_writes, memory_size]`
"""
with tf.name_scope('directional_read_weights'):
# We calculate the forward and backward directions for each pair of
# read and write heads; hence we need to tile the read weights and do a
# sort of "outer product" to get this.
expanded_read_weights = tf.stack([prev_read_weights] * self._num_writes,
1)
result = tf.matmul(expanded_read_weights, link, adjoint_b=forward)
# Swap dimensions 1, 2 so order is [batch, reads, writes, memory]:
return tf.transpose(result, perm=[0, 2, 1, 3])
def _link(self, prev_link, prev_precedence_weights, write_weights):
"""Calculates the new link graphs.
For each write head, the link is a directed graph (represented by a matrix
with entries in range [0, 1]) whose vertices are the memory locations, and
an edge indicates temporal ordering of writes.
Args:
prev_link: A tensor of shape `[batch_size, num_writes, memory_size,
memory_size]` representing the previous link graphs for each write
head.
prev_precedence_weights: A tensor of shape `[batch_size, num_writes,
memory_size]` which is the previous "aggregated" write weights for
each write head.
write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`
containing the new locations in memory written to.
Returns:
A tensor of shape `[batch_size, num_writes, memory_size, memory_size]`
containing the new link graphs for each write head.
"""
with tf.name_scope('link'):
batch_size = tf.shape(prev_link)[0]
write_weights_i = tf.expand_dims(write_weights, 3)
write_weights_j = tf.expand_dims(write_weights, 2)
prev_precedence_weights_j = tf.expand_dims(prev_precedence_weights, 2)
prev_link_scale = 1 - write_weights_i - write_weights_j
new_link = write_weights_i * prev_precedence_weights_j
link = prev_link_scale * prev_link + new_link
# Return the link with the diagonal set to zero, to remove self-looping
# edges.
return tf.matrix_set_diag(
link,
tf.zeros(
[batch_size, self._num_writes, self._memory_size],
dtype=link.dtype))
def _precedence_weights(self, prev_precedence_weights, write_weights):
"""Calculates the new precedence weights given the current write weights.
The precedence weights are the "aggregated write weights" for each write
head, where write weights with sum close to zero will leave the precedence
weights unchanged, but with sum close to one will replace the precedence
weights.
Args:
prev_precedence_weights: A tensor of shape `[batch_size, num_writes,
memory_size]` containing the previous precedence weights.
write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`
containing the new write weights.
Returns:
A tensor of shape `[batch_size, num_writes, memory_size]` containing the
new precedence weights.
"""
with tf.name_scope('precedence_weights'):
write_sum = tf.reduce_sum(write_weights, 2, keepdims=True)
return (1 - write_sum) * prev_precedence_weights + write_weights
@property
def state_size(self):
"""Returns a `TemporalLinkageState` tuple of the state tensors' shapes."""
return TemporalLinkageState(
link=tf.TensorShape(
[self._num_writes, self._memory_size, self._memory_size]),
precedence_weights=tf.TensorShape([self._num_writes,
self._memory_size]),)
class Freeness(snt.RNNCore):
"""Memory usage that is increased by writing and decreased by reading.
This module is a pseudo-RNNCore whose state is a tensor with values in
the range [0, 1] indicating the usage of each of `memory_size` memory slots.
The usage is:
* Increased by writing, where usage is increased towards 1 at the write
addresses.
* Decreased by reading, where usage is decreased after reading from a
location when free_gate is close to 1.
The function `write_allocation_weights` can be invoked to get free locations
to write to for a number of write heads.
"""
def __init__(self, memory_size, name='freeness'):
"""Creates a Freeness module.
Args:
memory_size: Number of memory slots.
name: Name of the module.
"""
super(Freeness, self).__init__(name=name)
self._memory_size = memory_size
def _build(self, write_weights, free_gate, read_weights, prev_usage):
"""Calculates the new memory usage u_t.
Memory that was written to in the previous time step will have its usage
increased; memory that was read from and the controller says can be "freed"
will have its usage decreased.
Args:
write_weights: tensor of shape `[batch_size, num_writes,
memory_size]` giving write weights at previous time step.
free_gate: tensor of shape `[batch_size, num_reads]` which indicates
which read heads read memory that can now be freed.
read_weights: tensor of shape `[batch_size, num_reads,
memory_size]` giving read weights at previous time step.
prev_usage: tensor of shape `[batch_size, memory_size]` giving
usage u_{t - 1} at the previous time step, with entries in range
[0, 1].
Returns:
tensor of shape `[batch_size, memory_size]` representing updated memory
usage.
"""
# Calculation of usage is not differentiable with respect to write weights.
write_weights = tf.stop_gradient(write_weights)
usage = self._usage_after_write(prev_usage, write_weights)
usage = self._usage_after_read(usage, free_gate, read_weights)
return usage
def write_allocation_weights(self, usage, write_gates, num_writes):
"""Calculates freeness-based locations for writing to.
This finds unused memory by ranking the memory locations by usage, for each
write head. (For more than one write head, we use a "simulated new usage"
which takes into account the fact that the previous write head will increase
the usage in that area of the memory.)
Args:
usage: A tensor of shape `[batch_size, memory_size]` representing
current memory usage.
write_gates: A tensor of shape `[batch_size, num_writes]` with values in
the range [0, 1] indicating how much each write head does writing
based on the address returned here (and hence how much usage
increases).
num_writes: The number of write heads to calculate write weights for.
Returns:
tensor of shape `[batch_size, num_writes, memory_size]` containing the
freeness-based write locations. Note that this isn't scaled by
`write_gate`; this scaling must be applied externally.
"""
with tf.name_scope('write_allocation_weights'):
# expand gatings over memory locations
write_gates = tf.expand_dims(write_gates, -1)
allocation_weights = []
for i in range(num_writes):
allocation_weights.append(self._allocation(usage))
# update usage to take into account writing to this new allocation
usage += ((1 - usage) * write_gates[:, i, :] * allocation_weights[i])
# Pack the allocation weights for the write heads into one tensor.
return tf.stack(allocation_weights, axis=1)
def _usage_after_write(self, prev_usage, write_weights):
"""Calcualtes the new usage after writing to memory.
Args:
prev_usage: tensor of shape `[batch_size, memory_size]`.
write_weights: tensor of shape `[batch_size, num_writes, memory_size]`.
Returns:
New usage, a tensor of shape `[batch_size, memory_size]`.
"""
with tf.name_scope('usage_after_write'):
# Calculate the aggregated effect of all write heads
write_weights = 1 - util.reduce_prod(1 - write_weights, 1)
return prev_usage + (1 - prev_usage) * write_weights
def _usage_after_read(self, prev_usage, free_gate, read_weights):
"""Calcualtes the new usage after reading and freeing from memory.
Args:
prev_usage: tensor of shape `[batch_size, memory_size]`.
free_gate: tensor of shape `[batch_size, num_reads]` with entries in the
range [0, 1] indicating the amount that locations read from can be
freed.
read_weights: tensor of shape `[batch_size, num_reads, memory_size]`.
Returns:
New usage, a tensor of shape `[batch_size, memory_size]`.
"""
with tf.name_scope('usage_after_read'):
free_gate = tf.expand_dims(free_gate, -1)
free_read_weights = free_gate * read_weights
phi = util.reduce_prod(1 - free_read_weights, 1, name='phi')
return prev_usage * phi
def _allocation(self, usage):
r"""Computes allocation by sorting `usage`.
This corresponds to the value a = a_t[\phi_t[j]] in the paper.
Args:
usage: tensor of shape `[batch_size, memory_size]` indicating current
memory usage. This is equal to u_t in the paper when we only have one
write head, but for multiple write heads, one should update the usage
while iterating through the write heads to take into account the
allocation returned by this function.
Returns:
Tensor of shape `[batch_size, memory_size]` corresponding to allocation.
"""
with tf.name_scope('allocation'):
# Ensure values are not too small prior to cumprod.
usage = _EPSILON + (1 - _EPSILON) * usage
nonusage = 1 - usage
sorted_nonusage, indices = tf.nn.top_k(
nonusage, k=self._memory_size, name='sort')
sorted_usage = 1 - sorted_nonusage
prod_sorted_usage = tf.cumprod(sorted_usage, axis=1, exclusive=True)
sorted_allocation = sorted_nonusage * prod_sorted_usage
inverse_indices = util.batch_invert_permutation(indices)
# This final line "unsorts" sorted_allocation, so that the indexing
# corresponds to the original indexing of `usage`.
return util.batch_gather(sorted_allocation, inverse_indices)
@property
def state_size(self):
"""Returns the shape of the state tensor."""
return tf.TensorShape([self._memory_size])
| dnc-master | dnc/addressing.py |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A repeat copy task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import sonnet as snt
import tensorflow as tf
DatasetTensors = collections.namedtuple('DatasetTensors', ('observations',
'target', 'mask'))
def masked_sigmoid_cross_entropy(logits,
target,
mask,
time_average=False,
log_prob_in_bits=False):
"""Adds ops to graph which compute the (scalar) NLL of the target sequence.
The logits parametrize independent bernoulli distributions per time-step and
per batch element, and irrelevant time/batch elements are masked out by the
mask tensor.
Args:
logits: `Tensor` of activations for which sigmoid(`logits`) gives the
bernoulli parameter.
target: time-major `Tensor` of target.
mask: time-major `Tensor` to be multiplied elementwise with cost T x B cost
masking out irrelevant time-steps.
time_average: optionally average over the time dimension (sum by default).
log_prob_in_bits: iff True express log-probabilities in bits (default nats).
Returns:
A `Tensor` representing the log-probability of the target.
"""
xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=logits)
loss_time_batch = tf.reduce_sum(xent, axis=2)
loss_batch = tf.reduce_sum(loss_time_batch * mask, axis=0)
batch_size = tf.cast(tf.shape(logits)[1], dtype=loss_time_batch.dtype)
if time_average:
mask_count = tf.reduce_sum(mask, axis=0)
loss_batch /= (mask_count + np.finfo(np.float32).eps)
loss = tf.reduce_sum(loss_batch) / batch_size
if log_prob_in_bits:
loss /= tf.log(2.)
return loss
def bitstring_readable(data, batch_size, model_output=None, whole_batch=False):
"""Produce a human readable representation of the sequences in data.
Args:
data: data to be visualised
batch_size: size of batch
model_output: optional model output tensor to visualize alongside data.
whole_batch: whether to visualise the whole batch. Only the first sample
will be visualized if False
Returns:
A string used to visualise the data batch
"""
def _readable(datum):
return '+' + ' '.join(['-' if x == 0 else '%d' % x for x in datum]) + '+'
obs_batch = data.observations
targ_batch = data.target
iterate_over = range(batch_size) if whole_batch else range(1)
batch_strings = []
for batch_index in iterate_over:
obs = obs_batch[:, batch_index, :]
targ = targ_batch[:, batch_index, :]
obs_channels = range(obs.shape[1])
targ_channels = range(targ.shape[1])
obs_channel_strings = [_readable(obs[:, i]) for i in obs_channels]
targ_channel_strings = [_readable(targ[:, i]) for i in targ_channels]
readable_obs = 'Observations:\n' + '\n'.join(obs_channel_strings)
readable_targ = 'Targets:\n' + '\n'.join(targ_channel_strings)
strings = [readable_obs, readable_targ]
if model_output is not None:
output = model_output[:, batch_index, :]
output_strings = [_readable(output[:, i]) for i in targ_channels]
strings.append('Model Output:\n' + '\n'.join(output_strings))
batch_strings.append('\n\n'.join(strings))
return '\n' + '\n\n\n\n'.join(batch_strings)
class RepeatCopy(snt.AbstractModule):
"""Sequence data generator for the task of repeating a random binary pattern.
When called, an instance of this class will return a tuple of tensorflow ops
(obs, targ, mask), representing an input sequence, target sequence, and
binary mask. Each of these ops produces tensors whose first two dimensions
represent sequence position and batch index respectively. The value in
mask[t, b] is equal to 1 iff a prediction about targ[t, b, :] should be
penalized and 0 otherwise.
For each realisation from this generator, the observation sequence is
comprised of I.I.D. uniform-random binary vectors (and some flags).
The target sequence is comprised of this binary pattern repeated
some number of times (and some flags). Before explaining in more detail,
let's examine the setup pictorially for a single batch element:
```none
Note: blank space represents 0.
time ------------------------------------------>
+-------------------------------+
mask: |0000000001111111111111111111111|
+-------------------------------+
+-------------------------------+
target: | 1| 'end-marker' channel.
| 101100110110011011001 |
| 010101001010100101010 |
+-------------------------------+
+-------------------------------+
observation: | 1011001 |
| 0101010 |
|1 | 'start-marker' channel
| 3 | 'num-repeats' channel.
+-------------------------------+
```
The length of the random pattern and the number of times it is repeated
in the target are both discrete random variables distributed according to
uniform distributions whose parameters are configured at construction time.
The obs sequence has two extra channels (components in the trailing dimension)
which are used for flags. One channel is marked with a 1 at the first time
step and is otherwise equal to 0. The other extra channel is zero until the
binary pattern to be repeated ends. At this point, it contains an encoding of
the number of times the observation pattern should be repeated. Rather than
simply providing this integer number directly, it is normalised so that
a neural network may have an easier time representing the number of
repetitions internally. To allow a network to be readily evaluated on
instances of this task with greater numbers of repetitions, the range with
respect to which this encoding is normalised is also configurable by the user.
As in the diagram, the target sequence is offset to begin directly after the
observation sequence; both sequences are padded with zeros to accomplish this,
resulting in their lengths being equal. Additional padding is done at the end
so that all sequences in a minibatch represent tensors with the same shape.
"""
def __init__(
self,
num_bits=6,
batch_size=1,
min_length=1,
max_length=1,
min_repeats=1,
max_repeats=2,
norm_max=10,
log_prob_in_bits=False,
time_average_cost=False,
name='repeat_copy',):
"""Creates an instance of RepeatCopy task.
Args:
name: A name for the generator instance (for name scope purposes).
num_bits: The dimensionality of each random binary vector.
batch_size: Minibatch size per realization.
min_length: Lower limit on number of random binary vectors in the
observation pattern.
max_length: Upper limit on number of random binary vectors in the
observation pattern.
min_repeats: Lower limit on number of times the obervation pattern
is repeated in targ.
max_repeats: Upper limit on number of times the observation pattern
is repeated in targ.
norm_max: Upper limit on uniform distribution w.r.t which the encoding
of the number of repetitions presented in the observation sequence
is normalised.
log_prob_in_bits: By default, log probabilities are expressed in units of
nats. If true, express log probabilities in bits.
time_average_cost: If true, the cost at each time step will be
divided by the `true`, sequence length, the number of non-masked time
steps, in each sequence before any subsequent reduction over the time
and batch dimensions.
"""
super(RepeatCopy, self).__init__(name=name)
self._batch_size = batch_size
self._num_bits = num_bits
self._min_length = min_length
self._max_length = max_length
self._min_repeats = min_repeats
self._max_repeats = max_repeats
self._norm_max = norm_max
self._log_prob_in_bits = log_prob_in_bits
self._time_average_cost = time_average_cost
def _normalise(self, val):
return val / self._norm_max
def _unnormalise(self, val):
return val * self._norm_max
@property
def time_average_cost(self):
return self._time_average_cost
@property
def log_prob_in_bits(self):
return self._log_prob_in_bits
@property
def num_bits(self):
"""The dimensionality of each random binary vector in a pattern."""
return self._num_bits
@property
def target_size(self):
"""The dimensionality of the target tensor."""
return self._num_bits + 1
@property
def batch_size(self):
return self._batch_size
def _build(self):
"""Implements build method which adds ops to graph."""
# short-hand for private fields.
min_length, max_length = self._min_length, self._max_length
min_reps, max_reps = self._min_repeats, self._max_repeats
num_bits = self.num_bits
batch_size = self.batch_size
# We reserve one dimension for the num-repeats and one for the start-marker.
full_obs_size = num_bits + 2
# We reserve one target dimension for the end-marker.
full_targ_size = num_bits + 1
start_end_flag_idx = full_obs_size - 2
num_repeats_channel_idx = full_obs_size - 1
# Samples each batch index's sequence length and the number of repeats.
sub_seq_length_batch = tf.random_uniform(
[batch_size], minval=min_length, maxval=max_length + 1, dtype=tf.int32)
num_repeats_batch = tf.random_uniform(
[batch_size], minval=min_reps, maxval=max_reps + 1, dtype=tf.int32)
# Pads all the batches to have the same total sequence length.
total_length_batch = sub_seq_length_batch * (num_repeats_batch + 1) + 3
max_length_batch = tf.reduce_max(total_length_batch)
residual_length_batch = max_length_batch - total_length_batch
obs_batch_shape = [max_length_batch, batch_size, full_obs_size]
targ_batch_shape = [max_length_batch, batch_size, full_targ_size]
mask_batch_trans_shape = [batch_size, max_length_batch]
obs_tensors = []
targ_tensors = []
mask_tensors = []
# Generates patterns for each batch element independently.
for batch_index in range(batch_size):
sub_seq_len = sub_seq_length_batch[batch_index]
num_reps = num_repeats_batch[batch_index]
# The observation pattern is a sequence of random binary vectors.
obs_pattern_shape = [sub_seq_len, num_bits]
obs_pattern = tf.cast(
tf.random_uniform(
obs_pattern_shape, minval=0, maxval=2, dtype=tf.int32),
tf.float32)
# The target pattern is the observation pattern repeated n times.
# Some reshaping is required to accomplish the tiling.
targ_pattern_shape = [sub_seq_len * num_reps, num_bits]
flat_obs_pattern = tf.reshape(obs_pattern, [-1])
flat_targ_pattern = tf.tile(flat_obs_pattern, tf.stack([num_reps]))
targ_pattern = tf.reshape(flat_targ_pattern, targ_pattern_shape)
# Expand the obs_pattern to have two extra channels for flags.
# Concatenate start flag and num_reps flag to the sequence.
obs_flag_channel_pad = tf.zeros([sub_seq_len, 2])
obs_start_flag = tf.one_hot(
[start_end_flag_idx], full_obs_size, on_value=1., off_value=0.)
num_reps_flag = tf.one_hot(
[num_repeats_channel_idx],
full_obs_size,
on_value=self._normalise(tf.cast(num_reps, tf.float32)),
off_value=0.)
# note the concatenation dimensions.
obs = tf.concat([obs_pattern, obs_flag_channel_pad], 1)
obs = tf.concat([obs_start_flag, obs], 0)
obs = tf.concat([obs, num_reps_flag], 0)
# Now do the same for the targ_pattern (it only has one extra channel).
targ_flag_channel_pad = tf.zeros([sub_seq_len * num_reps, 1])
targ_end_flag = tf.one_hot(
[start_end_flag_idx], full_targ_size, on_value=1., off_value=0.)
targ = tf.concat([targ_pattern, targ_flag_channel_pad], 1)
targ = tf.concat([targ, targ_end_flag], 0)
# Concatenate zeros at end of obs and begining of targ.
# This aligns them s.t. the target begins as soon as the obs ends.
obs_end_pad = tf.zeros([sub_seq_len * num_reps + 1, full_obs_size])
targ_start_pad = tf.zeros([sub_seq_len + 2, full_targ_size])
# The mask is zero during the obs and one during the targ.
mask_off = tf.zeros([sub_seq_len + 2])
mask_on = tf.ones([sub_seq_len * num_reps + 1])
obs = tf.concat([obs, obs_end_pad], 0)
targ = tf.concat([targ_start_pad, targ], 0)
mask = tf.concat([mask_off, mask_on], 0)
obs_tensors.append(obs)
targ_tensors.append(targ)
mask_tensors.append(mask)
# End the loop over batch index.
# Compute how much zero padding is needed to make tensors sequences
# the same length for all batch elements.
residual_obs_pad = [
tf.zeros([residual_length_batch[i], full_obs_size])
for i in range(batch_size)
]
residual_targ_pad = [
tf.zeros([residual_length_batch[i], full_targ_size])
for i in range(batch_size)
]
residual_mask_pad = [
tf.zeros([residual_length_batch[i]]) for i in range(batch_size)
]
# Concatenate the pad to each batch element.
obs_tensors = [
tf.concat([o, p], 0) for o, p in zip(obs_tensors, residual_obs_pad)
]
targ_tensors = [
tf.concat([t, p], 0) for t, p in zip(targ_tensors, residual_targ_pad)
]
mask_tensors = [
tf.concat([m, p], 0) for m, p in zip(mask_tensors, residual_mask_pad)
]
# Concatenate each batch element into a single tensor.
obs = tf.reshape(tf.concat(obs_tensors, 1), obs_batch_shape)
targ = tf.reshape(tf.concat(targ_tensors, 1), targ_batch_shape)
mask = tf.transpose(
tf.reshape(tf.concat(mask_tensors, 0), mask_batch_trans_shape))
return DatasetTensors(obs, targ, mask)
def cost(self, logits, targ, mask):
return masked_sigmoid_cross_entropy(
logits,
targ,
mask,
time_average=self.time_average_cost,
log_prob_in_bits=self.log_prob_in_bits)
def to_human_readable(self, data, model_output=None, whole_batch=False):
obs = data.observations
unnormalised_num_reps_flag = self._unnormalise(obs[:,:,-1:]).round()
obs = np.concatenate([obs[:,:,:-1], unnormalised_num_reps_flag], axis=2)
data = data._replace(observations=obs)
return bitstring_readable(data, self.batch_size, model_output, whole_batch)
| dnc-master | dnc/repeat_copy.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit testing for the recombination code."""
import unittest
from absl.testing import absltest
from alphatensor.recombination import example
from alphatensor.recombination import recombination
class RecombinationTest(unittest.TestCase):
def test_example(self):
base_factors = example.get_3x3x3_factorization()
results = recombination.recombine((10, 4, 9), base_factors)
self.assertEqual(results['rank'], 255)
if __name__ == '__main__':
absltest.main()
| alphatensor-main | recombination/recombination_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement (a generalized version of) the idea from papers [1, 2].
[1] Alexandre Sedoglavic, "A non-commutative algorithm for multiplying (7 × 7)
matrices using 250 multiplications" (2017).
[2] Drevet, Charles-Éric, Md Nazrul Islam, and Éric Schost. "Optimization
techniques for small matrix multiplication." Theoretical Computer Science 412.22
(2011): 2219-2236.
"""
from typing import Any, Dict, Iterator, List, Sequence, Tuple
import numpy as np
from alphatensor.recombination import sota
def _tensor_shape_to_matrix_sizes(
tensor_shape: Tuple[int, int, int]) -> Tuple[int, int, int]:
"""Returns the sizes of the multiplied matrices from the matmul tensor shape.
When multiplying an [a, b] and [b, c] matrix, the size of the corresponding
matrix multiplication tensor T_{a, b, c} is [ab, bc, ca]. This function
computes the inverse mapping from the tensor size to the matrix sizes.
Args:
tensor_shape: Shape of a 3D matrix multiplication tensor T_{a, b, c}.
Returns:
The three integers a, b, c describing the matrix sizes being multiplied.
"""
ab, bc, ca = tensor_shape
a = int(np.sqrt(ab * ca // bc))
b = int(np.sqrt(ab * bc // ca))
c = int(np.sqrt(bc * ca // ab))
assert a * b == ab and b * c == bc and c * a == ca
return a, b, c
def _factorization_2d_to_3d(
factors: Tuple[np.ndarray, np.ndarray, np.ndarray],
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Converts factorization with `u` of shape [a*b, rank] into [a, b, rank]."""
u, v, w = factors
a, b, c = _tensor_shape_to_matrix_sizes((u.shape[0], v.shape[0], w.shape[0]))
rank = u.shape[-1]
u = u.reshape(a, b, rank)
v = v.reshape(b, c, rank)
w = w.reshape(c, a, rank)
return u, v, w
def _block_fillings(num_blocks: int, budget: int) -> Iterator[List[int]]:
"""Iterates over all options of filling `num_blocks` with `budget` balls."""
if num_blocks == 1:
yield [budget]
return
for i in range(budget + 1):
for rest in _block_fillings(num_blocks - 1, budget - i):
yield [i] + rest
def _process_additions(matricized_factor_vector: np.ndarray,
row_nonzeros: Sequence[int],
col_nonzeros: Sequence[int]) -> Tuple[int, int]:
"""Returns the nonzero matrix size after adding multiple matrices together.
Nonzero elements of a factor vector stipulate that the corresponding entries
of the base matrix (which in this case are themselves matrices) are to be
added up. The number of potentially nonzero rows in this sum is the maximum
over the number of nonzero rows in each summand, and similarly for the number
of columns. See Supplementary Information of the paper for an illustrative
example.
Args:
matricized_factor_vector: [x, y]-shaped array representing a single factor
vector (`u`, or `v`, or `w`) in matrix form. For example, [x, y] = [a, b]
when this is a `u` vector.
row_nonzeros: List of length x, with the i-th entry specifying the number of
rows of the target matrix that were allocated to the i-th row of the base
matrix on the first level of recursion.
col_nonzeros: List of length y, with the i-th entry specifying the number of
columns of the target matrix that were allocated to the i-th column of the
base matrix on the first level of recursion.
Returns:
Two integers describing respectively the largest number of nonzero rows and
columns after the submatrices corresponding to nonzero entries of the factor
vector `matricized_factor_vector` are added up.
"""
max_rows = 0
max_cols = 0
for i, j in zip(*np.nonzero(matricized_factor_vector)):
max_rows = max(max_rows, row_nonzeros[i])
max_cols = max(max_cols, col_nonzeros[j])
return max_rows, max_cols
def recombine(target_matrix_sizes: Tuple[int, int, int],
base_factors: Tuple[np.ndarray, np.ndarray, np.ndarray],
) -> Dict[str, Any]:
"""Decomposes T_{a, b, c} using `base_factors` as the 1st level of recursion.
See Supplementary Information of the paper for more details.
Args:
target_matrix_sizes: Triplet (a, b, c) specifing the matrix multiplication
problem of multiplying an [a, b] matrix by a [b, c] matrix. Equivalently,
specifies a matrix multiplication tensor T_{a, b, c} to be decomposed.
base_factors: Three arrays providing a factorization of a (usually smaller)
matrix multiplication tensor T_{base_a, base_b, base_c}. This algorithm
will be used on the first level of recursion when decomposing T_{a, b, c}.
Returns:
Dictionary with information about the best rank discovered for T_{a, b, c}.
"""
base_rank = base_factors[0].shape[-1]
base_tensor_shape = tuple(v.shape[0] for v in base_factors)
base_a, base_b, base_c = _tensor_shape_to_matrix_sizes(base_tensor_shape)
u, v, w = _factorization_2d_to_3d(base_factors)
# The matrix multiplication tensor T_{a, b, c} by convention represents the
# operation (A, B) -> (AB)^T, i.e. with an additional transposition. Here we
# will work with the non-transposed version for simplicity.
w = w.transpose(1, 0, 2)
best = {}
# To apply an algorithm for (base_a, base_b, base_c) to the target problem
# (target_a, target_b, target_c), we try all possibilities of how to allocate
# the `target_a` rows of the original problem to the `base_a` rows of the
# algorithm to be applied on the first level of recursion; and similarly for
# the `target_b` and `target_c` dimensions.
target_a, target_b, target_c = target_matrix_sizes
for allocation_a in _block_fillings(base_a, target_a):
for allocation_b in _block_fillings(base_b, target_b):
for allocation_c in _block_fillings(base_c, target_c):
total_rank = 0
small_matrix_sizes = []
for r in range(base_rank):
u1, u2 = _process_additions(u[:, :, r], allocation_a, allocation_b)
v1, v2 = _process_additions(v[:, :, r], allocation_b, allocation_c)
w1, w2 = _process_additions(w[:, :, r], allocation_a, allocation_c)
# We now need to compute the product of [u1, u2] and [v1, v2]-shaped
# matrices (with appropriate zero-padding), and then extract the
# [w1, w2] upper-left portion of the resulting product. Note that this
# can be achieved by an algorithm that multiplies matrices of sizes
# [min(u1, w1), min(u2, v1)] and [min(u2, v1), min(v2, w2)] since it
# is not necessary to compute elements that will end up zero/unused.
current_matrix_sizes = min(u1, w1), min(u2, v1), min(v2, w2)
total_rank += sota.get_sota_rank(*current_matrix_sizes)
small_matrix_sizes.append(current_matrix_sizes)
if not best or total_rank < best['rank']:
best = {
'rank': total_rank,
'small_matrix_sizes': small_matrix_sizes,
'allocation_pattern': (allocation_a, allocation_b, allocation_c),
}
return best
| alphatensor-main | recombination/recombination.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using recombination to find rank 255 for T_{4, 9, 10}."""
from typing import Sequence, Tuple
from absl import app
import numpy as np
from alphatensor.recombination import recombination
def get_3x3x3_factorization() -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns a rank-23 factorization of the matrix multiplication tensor T_3."""
u = np.array([
[1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, -1, -1, -1, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, -1, 1, 1, 0, 1, 0, 0, -1, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 1, 0, 0, -1, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, -1, -1, 0, 0, 0, 0, 0, -1, 0,
-1],
[0, 0, 0, 0, 1, 0, 1, 0, 0, -1, 1, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
], dtype=np.int32)
v = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[-1, -1, 0, 0, -1, 0, -1, -1, 1, -1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 1, 0, 0, 1, 0, 1, 1, -1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, -1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[-1, -1, 0, 0, -1, 1, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, -1, 0, 1, 0, 1, 0, -1, 0, 0],
[-1, -1, -1, -1, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
], dtype=np.int32)
w = np.array([
[0, 0, 0, 0, 0, 0, -1, 1, 1, 0, 0, -1, -1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1],
[-1, 1, 0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, -1, 1, 1, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 1, 0, 0, 0, -1, 0, -1, 1, 0, 0, 0, -1, 0, 0, -1, 0,
1],
[-1, 1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 1, 0, -1, 0, 0, -1, 0, 0],
], dtype=np.int32)
return u, v, w
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
base_factors = get_3x3x3_factorization()
# The result depends on the order of axes; (4, 9, 10) won't work as well.
target_mamu = (10, 4, 9)
# Should return rank 255, which is better than the previously known 259.
result = recombination.recombine(target_mamu, base_factors)
print(result)
assert result['rank'] == 255
if __name__ == '__main__':
app.run(main)
| alphatensor-main | recombination/example.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Best known ranks for different matrix multiplication sizes."""
import math
# Best known ranks of matrix multiplication tensors including improvements found
# by AlphaTensor, but excluding improvements found by recombining solutions.
_SOTA = {
(3, 3, 3): 23,
(3, 3, 4): 29,
(3, 3, 5): 36,
(3, 3, 6): 40,
(3, 3, 7): 49,
(3, 3, 8): 55,
(3, 3, 9): 63,
(3, 3, 10): 69,
(3, 3, 11): 76,
(3, 3, 12): 80,
(3, 4, 4): 38,
(3, 4, 5): 47, # Discovered by AlphaTensor.
(3, 4, 6): 56,
(3, 4, 7): 66,
(3, 4, 8): 75,
(3, 4, 9): 85,
(3, 4, 10): 94,
(3, 4, 11): 104,
(3, 4, 12): 112,
(3, 5, 5): 58,
(3, 5, 6): 70,
(3, 5, 7): 83,
(3, 5, 8): 94,
(3, 5, 9): 106,
(3, 5, 10): 116,
(3, 5, 11): 128,
(3, 5, 12): 140,
(3, 6, 6): 80,
(3, 6, 7): 96,
(3, 6, 8): 110,
(3, 6, 9): 120,
(3, 6, 10): 136,
(3, 6, 11): 150,
(3, 6, 12): 160,
(3, 7, 7): 115,
(3, 7, 8): 130,
(3, 7, 9): 145,
(3, 7, 10): 162,
(3, 7, 11): 179,
(3, 7, 12): 192,
(3, 8, 8): 149,
(3, 8, 9): 165,
(3, 8, 10): 185,
(3, 8, 11): 204,
(3, 8, 12): 220,
(3, 9, 9): 183,
(3, 9, 10): 205,
(3, 9, 11): 226,
(3, 9, 12): 240,
(3, 10, 10): 230,
(3, 10, 11): 252,
(3, 10, 12): 272,
(3, 11, 11): 276,
(3, 11, 12): 300,
(3, 12, 12): 320,
(4, 4, 4): 49,
(4, 4, 5): 63, # Discovered by AlphaTensor.
(4, 4, 6): 75,
(4, 4, 7): 87,
(4, 4, 8): 98,
(4, 4, 9): 104,
(4, 4, 10): 120,
(4, 4, 11): 130,
(4, 4, 12): 142,
(4, 5, 5): 76, # Discovered by AlphaTensor.
(4, 5, 6): 93,
(4, 5, 7): 109,
(4, 5, 8): 122,
(4, 5, 9): 140,
(4, 5, 10): 154,
(4, 5, 11): 170,
(4, 5, 12): 183,
(4, 6, 6): 105,
(4, 6, 7): 125,
(4, 6, 8): 140,
(4, 6, 9): 160,
(4, 6, 10): 175,
(4, 6, 11): 195,
(4, 6, 12): 210,
(4, 7, 7): 147,
(4, 7, 8): 164,
(4, 7, 9): 188,
(4, 7, 10): 207,
(4, 7, 11): 229,
(4, 7, 12): 246,
(4, 8, 8): 182,
(4, 8, 9): 208,
(4, 8, 10): 231,
(4, 8, 11): 255,
(4, 8, 12): 272,
(4, 9, 9): 225,
(4, 9, 10): 259,
(4, 9, 11): 284,
(4, 9, 12): 300,
(4, 10, 10): 280,
(4, 10, 11): 312,
(4, 10, 12): 336,
(4, 11, 11): 346,
(4, 11, 12): 372,
(4, 12, 12): 390,
(5, 5, 5): 98,
(5, 5, 6): 116,
(5, 5, 7): 136,
(5, 5, 8): 152,
(5, 5, 9): 173,
(5, 5, 10): 190,
(5, 5, 11): 206,
(5, 5, 12): 221,
(5, 6, 6): 137,
(5, 6, 7): 159,
(5, 6, 8): 176,
(5, 6, 9): 200,
(5, 6, 10): 218,
(5, 6, 11): 236,
(5, 6, 12): 250,
(5, 7, 7): 185,
(5, 7, 8): 206,
(5, 7, 9): 235,
(5, 7, 10): 258,
(5, 7, 11): 281,
(5, 7, 12): 300,
(5, 8, 8): 230,
(5, 8, 9): 264,
(5, 8, 10): 291,
(5, 8, 11): 319,
(5, 8, 12): 341,
(5, 9, 9): 297,
(5, 9, 10): 325,
(5, 9, 11): 359,
(5, 9, 12): 384,
(5, 10, 10): 352,
(5, 10, 11): 392,
(5, 10, 12): 424,
(5, 11, 11): 429,
(5, 11, 12): 462,
(5, 12, 12): 491,
(6, 6, 6): 160,
(6, 6, 7): 185,
(6, 6, 8): 203,
(6, 6, 9): 225,
(6, 6, 10): 247,
(6, 6, 11): 268,
(6, 6, 12): 280,
(6, 7, 7): 215,
(6, 7, 8): 239,
(6, 7, 9): 273,
(6, 7, 10): 297,
(6, 7, 11): 324,
(6, 7, 12): 344,
(6, 8, 8): 266,
(6, 8, 9): 296,
(6, 8, 10): 336,
(6, 8, 11): 368,
(6, 8, 12): 390,
(6, 9, 9): 343,
(6, 9, 10): 375,
(6, 9, 11): 416,
(6, 9, 12): 435,
(6, 10, 10): 406,
(6, 10, 11): 454,
(6, 10, 12): 488,
(6, 11, 11): 496,
(6, 11, 12): 530,
(6, 12, 12): 560,
(7, 7, 7): 250,
(7, 7, 8): 279,
(7, 7, 9): 321,
(7, 7, 10): 352,
(7, 7, 11): 387,
(7, 7, 12): 414,
(7, 8, 8): 310,
(7, 8, 9): 360,
(7, 8, 10): 398,
(7, 8, 11): 438,
(7, 8, 12): 468,
(7, 9, 9): 406,
(7, 9, 10): 450,
(7, 9, 11): 492,
(7, 9, 12): 522,
(7, 10, 10): 494,
(7, 10, 11): 543,
(7, 10, 12): 576,
(7, 11, 11): 589,
(7, 11, 12): 626,
(7, 12, 12): 660,
(8, 8, 8): 343,
(8, 8, 9): 392,
(8, 8, 10): 443,
(8, 8, 11): 492,
(8, 8, 12): 508,
(8, 9, 9): 430,
(8, 9, 10): 492,
(8, 9, 11): 543,
(8, 9, 12): 570,
(8, 10, 10): 559,
(8, 10, 11): 610,
(8, 10, 12): 645,
(8, 11, 11): 660,
(8, 11, 12): 699,
(8, 12, 12): 735,
(9, 9, 9): 511,
(9, 9, 10): 540,
(9, 9, 11): 600,
(9, 9, 12): 600,
(9, 10, 10): 625,
(9, 10, 11): 680,
(9, 10, 12): 708,
(9, 11, 11): 754,
(9, 11, 12): 768,
(9, 12, 12): 800,
(10, 10, 10): 686,
(10, 10, 11): 758,
(10, 10, 12): 812,
(10, 11, 11): 836,
(10, 11, 12): 894,
(10, 12, 12): 936,
(11, 11, 11): 919,
(11, 11, 12): 972,
(11, 12, 12): 1022,
(12, 12, 12): 1040
}
def get_sota_rank(a: int, b: int, c: int) -> int:
"""Returns best known rank of T_{a, b, c} (without recombination results)."""
a, b, c = sorted([a, b, c])
if a == 0:
return 0
if a == 1:
return b * c
if a == 2:
# Hopcroft & Kerr.
return int(math.ceil((3 * b * c + max(b, c)) / 2))
return _SOTA[(a, b, c)]
| alphatensor-main | recombination/sota.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test correctness of the presented fast matrix multiplication algorithms."""
import unittest
from absl.testing import absltest
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
from alphatensor.benchmarking import factorizations
from alphatensor.benchmarking import utils
class MatrixMultiplicationCorrectnessTest(unittest.TestCase):
"""Tests of matrix multiplication correctness."""
def testTensorDecompositionGPU(self):
"""Tests that the factors decompose the matrix multiplication tensor."""
u, v, w = factorizations.get_4x4x4_alphatensor_gpu()
reconstructed_tensor = np.einsum('ir,jr,kr->ijk', u, v, w)
expected_tensor = utils.get_matrix_multiplication_tensor(4)
np.testing.assert_array_equal(reconstructed_tensor, expected_tensor)
def testTensorDecompositionTPU(self):
"""Tests that the factors decompose the matrix multiplication tensor."""
u, v, w = factorizations.get_4x4x4_alphatensor_tpu()
reconstructed_tensor = np.einsum('ir,jr,kr->ijk', u, v, w)
expected_tensor = utils.get_matrix_multiplication_tensor(4)
np.testing.assert_array_equal(reconstructed_tensor, expected_tensor)
def testGPUMatrixMultiplicationPrecision(self):
"""Compares standard multiplication against using the proposed algorithm.
Compare the result of multiplying two matrices via jnp.dot vs via the
proposed fast algorithm (up to numerical precision).
"""
config.update('jax_enable_x64', True)
factors = factorizations.get_4x4x4_alphatensor_gpu()
matrix_multiplication_algorithm = utils.algorithm_from_factors(factors)
# Generate the matrices to be multiplied.
rng1, rng2 = jax.random.split(jax.random.PRNGKey(42))
full_a = jax.random.uniform(rng1, (1024, 1024), dtype=jnp.float64)
full_b = jax.random.uniform(rng2, (1024, 1024), dtype=jnp.float64)
a = utils.block_split(full_a, 4, 4)
b = utils.block_split(full_b, 4, 4)
actual_result = matrix_multiplication_algorithm(a, b)
actual_result = np.bmat(actual_result)
desired_result = full_a @ full_b
np.testing.assert_allclose(actual_result, desired_result)
if __name__ == '__main__':
absltest.main()
| alphatensor-main | benchmarking/test_correctness.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Reproducing GPU speedup reported in the AlphaTensor paper.
Showing GPU speedup of the provably correct fast matrix multiplication
algorithm discovered by AlphaTensor compared to the Strassen^2 baseline.
You should get around 8.5% speedup for multiplying matrices of size 8192 x 8192
in float32 precision on NVIDIA V100 GPU.
This code requires sudo access to set the GPU clock frequency (to reduce
benchmarking variance).
Ideally this code should be run on a server that is not used by others at the
same time to remove interference.
The code was tested on an n1-standard-96 Google Cloud VM instance with eight
V100 GPUs, Intel Skylake CPU, using the "TensorFlow Enterprise 2.9 (CUDA 11.3)"
image, and with Jax installed by running the following commands:
```bash
pip install --upgrade pip
pip install --upgrade "jax[cuda]" \
-f https://storage.googleapis.com/jax-releases/jax_releases.html
```
"""
import subprocess
import numpy as np
# Might be needed on GCP because of the following bug:
# https://github.com/google/jax/issues/9218
import scipy.signal # pylint: disable=unused-import
from alphatensor.benchmarking import factorizations
from alphatensor.benchmarking import utils
def main():
process = subprocess.Popen(['nvidia-smi'], stdout=subprocess.PIPE)
output, _ = process.communicate()
if 'V100' not in str(output):
raise ValueError('To reproduce the results from the paper, please run on a'
'server with V100 GPU.')
print('Fixing GPU clock frequency to 1530 to reduce benchmarking variance...')
process = subprocess.Popen(
'sudo nvidia-smi -pm ENABLED -i 0'.split(' '), stdout=subprocess.PIPE)
output, _ = process.communicate()
process = subprocess.Popen(
'sudo nvidia-smi --lock-gpu-clocks=1530,1530'.split(' '),
stdout=subprocess.PIPE)
output, _ = process.communicate()
print('Done.')
num_trials = 10
matrix_sizes = [8192, 10240, 12288, 14336, 16384, 18432, 20480]
factorized_algorithms = [
('Strassen^2', factorizations.get_4x4x4_strassen_squared()),
('AlphaTensor GPU-optimized', factorizations.get_4x4x4_alphatensor_gpu()),
('AlphaTensor TPU-optimized', factorizations.get_4x4x4_alphatensor_tpu()),
]
for s in matrix_sizes:
print(f'Multiplying {s} x {s} matrices')
print('='*40)
results_dot = utils.benchmark_jnp_dot((s, s, s), num_trials=num_trials)
for algorithm_name, factorization in factorized_algorithms:
if algorithm_name == 'AlphaTensor TPU-optimized' and s > 19000:
continue # This TPU-optimized algorithm runs OOM on a V100 GPU.
results_algorithm = utils.benchmark_factorized_algorithm(
factorization, (s, s, s), num_trials=num_trials)
ratio = np.median(results_dot / results_algorithm)
improvement = 100 * ratio - 100
print('%s vs `jnp.dot`: %0.2f%% speedup' % (algorithm_name, improvement))
print('\n\n')
if __name__ == '__main__':
main()
| alphatensor-main | benchmarking/run_gpu_benchmark.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factorizations discovered by AlphaTensor and the Strassen^2 factorization."""
import numpy as np
def get_4x4x4_alphatensor_gpu() -> np.ndarray:
"""Returns a factorization for fast matrix multiplication on NVIDIA V100 GPUs.
This factorization was discovered by AlphaTensor while optimizing for the
runtime of multiplying two 8192 x 8192 matrices on an NVIDIA V100 GPU in
`float32`.
Returns:
[3, 16, 49]-shaped array representing a rank-49 factorization of the
(symmetrized version of the) matrix multiplication tensor T_4 = <4, 4, 4>
in standard arithmetic.
"""
u = np.array([
[-1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1, 0, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0,
0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, -1, 0, -1, 0, 1, 0, 0, 0, 0, -1, 1, 1, 0, 1, 0, 1, 0, 0,
1, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1,
0],
[-1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -1, 0, 1, 0, 1, 0, -1, 1, 0, 0, 0, 1, 0, -1, 0, -1, 0, -1, 0,
0, -1, 1, 0],
[-1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0, -1, 0, -1, 0, 1, 0, 1, 0, 1, 0, 0, 0,
0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, -1, 0, -1, 1, 0, 0,
-1, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, -1, 1, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, -1, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, -1, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, -1, 0, 0, 0,
0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 1, 0, 1, -1, 0, 0, 1,
0, -1, 0],
[-1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, -1, 0, 0, 0, 1, 0, -1, 1, -1, 1, 0, 1, 0, -1, 0, -1, 0, 0, 0,
-1, 0, 1, 0],
[-1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0,
0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, -1, 1, 0, -1, 0, -1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, -1, 1, 0, 0, 1, 0, 1, 0, 0,
1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0,
0],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 1, -1,
0, -1, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 0],
[-1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, -1, 1, -1, 0, 1, 0, 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 1, 0, -1, 0, 0, -1, 1, 0, 0, 0,
0, 0, 1],
[1, 0, 0, 1, 0, 0, 0, -1, 1, 0, -1, 0, -1, 1, 0, 0, 0, -1, 0, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 1, 0, -1, -1, 1, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0,
0],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, -1,
0, -1, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0]], dtype=np.int32)
v = np.array([
[0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
-1, 0, -1, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0,
0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, -1, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, -1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, -1, -1, -1, 0, 1, 1, 0, 0, 0, 0,
1, 0],
[0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
-1],
[0, -1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, -1, 0, 0, -1, 1, 0, -1, 0, 0, 0, 1, 0, 1, 0, -1, -1, 0, 0,
0, 0, -1, 1],
[0, -1, 0, 1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 1, 1, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 0, 0, -1, 1, 0, -1,
0, 0, 0],
[0, 1, 0, -1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, -1, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, -1, -1, -1, 0, 0, 1, 0, 0, 1,
0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0,
0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, -1, 0, 0, 1, -1, 0, 1, 0,
0, -1],
[0, -1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, -1, 0, 0, -1, 1, 0, -1, 0, -1, 1, 1, 0, 1, 0, 0, -1, 0, 0,
-1, 0, 0, 1],
[0, -1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0,
0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, -1, 0, -1, 1, -1, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, -1, -1, 0, 0, 1, 1, 0, 0, 0,
0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0],
[0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, -1, 0, 1,
0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, -1, 0],
[0, -1, 0, 1, 0, 0, 0, -1, 0, -1, 0, 1, 0, 1, -1, 1, 1, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 1, 0, 0, 0, -1, 1, 0, 0,
1, 0, 0],
[0, 1, 0, -1, 0, 0, 0, 1, 1, 0, 0, -1, 0, -1, 1, -1, 0, 0, 0, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, -1, 1, 0, -1, -1, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0],
[0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, -1, 0, 0,
0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0,
0, 0, 0]], dtype=np.int32)
w = np.array([
[0, 0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
1, 0, 1, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0,
0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, -1, 1, 0, 0, 0, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, -1, 1, 0, -1, 1, 0, 0, 0, 0, 0, -1, 1, 1, 0, 0, 1, 1, 0, 0,
0, 0, 1],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1,
0, 0],
[0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, -1, 0, 1, -1, 1, 0, 0, 0, 0, 1, 0, -1, 0, 0, -1, -1, 0,
0, 1, 0, -1],
[0, 0, -1, 1, 0, 0, 0, 1, 0, 0, -1, 0, -1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1, -1, 0, 1, 0, -1, 0,
-1, 0, 0, 0],
[0, 0, 1, -1, 0, 0, 0, -1, 1, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0, -1, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, -1, 1, 0, 0, 0, 0, 1, -1, 1, 1, 0, 0, 0, 1, 0, 1,
0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, -1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0,
0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 1, 0, -1, 0, 1, 0, 1,
-1, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, -1, 0, 1, -1, 1, 0, 1, 0, -1, 1, 0, -1, 0, 0, 0, -1, 0,
-1, 1, 0, 0],
[0, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0,
0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, -1, 1, -1, 0, 0, 1, 0, -1, 1, -1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, -1, 1, 0, 0, 0, 1, 1, 0, 0,
0, 0, 1],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 1, 0, 1, -1, 0,
0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, -1],
[0, 0, -1, 1, 0, 0, 0, 1, 0, 1, -1, 0, -1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, -1, 0, 0, 1, 0, -1, 0, 0,
0, 1, 0],
[0, 0, 1, -1, 0, 0, 0, -1, 1, -1, 0, 0, 1, 0, -1, 0, -1, 0, 0, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, -1, 1, -1, 1, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0],
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0,
0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, -1, 0,
0, 0, 0]], dtype=np.int32)
return np.array([u, v, w])
def get_4x4x4_alphatensor_tpu() -> np.ndarray:
"""Returns a factorization for fast matrix multiplication on a Google TPUv2.
This factorization was discovered by AlphaTensor while optimizing for the
runtime of multiplying two 8192 x 8192 matrices on a TPUv2 in `bfloat16`.
Returns:
[3, 16, 49]-shaped array representing a rank-49 factorization of the
(symmetrized version of the) matrix multiplication tensor T_4 = <4, 4, 4>
in standard arithmetic.
"""
u = np.array([
[1, 1, 0, 0, 0, 1, -1, 1, -1, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, -1, 0, 1, 0,
0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, -1, 0, 1, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0,
-1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0, 0,
1],
[1, 0, 1, -1, 1, 0, 0, 1, -1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, 0, 1, 0, -1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
0, 0],
[1, 1, 0, 0, 0, 1, -1, 0, 0, 1, 0, 1, -1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0,
1, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, -1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, -1, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, -1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, -1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0],
[1, 0, 1, -1, 1, 0, 0, 0, 0, 1, 0, 1, -1, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, -1, 0,
1, 0, 0]], dtype=np.int32)
v = np.array([
[1, 0, 1, 0, -1, 0, 1, 0, 1, -1, 0, 1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 1,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, -1, 0, 0,
0, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -1, 0, 1, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0, 0,
-1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0,
0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
0],
[1, -1, 0, 1, 0, 1, 0, 0, 1, -1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, -1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, -1, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0],
[1, 0, 1, 0, -1, 0, 1, 1, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0,
0, 1],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
-1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, -1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0,
-1, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0],
[1, -1, 0, 1, 0, 1, 0, 1, 0, 0, -1, 0, 1, -1, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0, -1,
0, 1, 0]], dtype=np.int32)
w = np.array([
[1, 0, 0, 1, 1, -1, 0, -1, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0,
1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, -1, 0,
0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, -1, 0, 0, 0, 0, 1, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, -1,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 1,
0],
[1, 1, -1, 0, 0, 0, 1, -1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0,
0, 1, 1, 0, 0, 0, 0, -1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1],
[1, 0, 0, 1, 1, -1, 0, 0, 1, 0, 1, -1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1,
0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, -1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0,
0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0,
-1, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 1],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0],
[1, 1, -1, 0, 0, 0, 1, 0, 1, 0, 1, -1, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, -1, 0, 0,
0, 0, 1]], dtype=np.int32)
return np.array([u, v, w])
def _get_2x2x2_strassen() -> np.ndarray:
"""Returns [3, 4, 7] array, representing a rank-7 factorization of T_2."""
# List of 7 factors, each of shape [3, 4].
factors = [[[1, 0, 0, 1], [1, 0, 0, 1], [1, 0, 0, 1]],
[[1, 0, 0, 0], [0, 1, 0, -1], [0, 0, 1, 1]],
[[0, 1, 0, -1], [0, 0, 1, 1], [1, 0, 0, 0]],
[[0, 0, 1, 1], [1, 0, 0, 0], [0, 1, 0, -1]],
[[0, 0, 0, 1], [-1, 0, 1, 0], [1, 1, 0, 0]],
[[-1, 0, 1, 0], [1, 1, 0, 0], [0, 0, 0, 1]],
[[1, 1, 0, 0], [0, 0, 0, 1], [-1, 0, 1, 0]]]
# Transpose into our standard format [3, S, R] = [3, 4, 7],
return np.transpose(np.array(factors, dtype=np.int32), [1, 2, 0])
def _product_factors(factors1: np.ndarray, factors2: np.ndarray) -> np.ndarray:
"""Computes the Kronecker product of `factors1` and `factors2`.
Args:
factors1: [3, n1**2, R1] factors of a tensor T1
factors2: [3, n2**2, R2] factors of a tensor T2
Returns:
[3, n1**2 * n2 ** 2, R1 * R2] factorization of the Kronecker square tensor
Reshape(kron(RT1, RT2)), where `RT1` and `RT2` are the reshapes of T1 and T2
into 6-dimensional tensors, and `Reshape` reshapes the tensor back into a
3-dimensional one.
"""
_, side1, rank1 = np.shape(factors1)
_, side2, rank2 = np.shape(factors2)
n1 = int(np.round(np.sqrt(side1)))
n2 = int(np.round(np.sqrt(side2)))
if n1 * n1 != side1 or n2 * n2 != side2:
raise ValueError(f'The sides {side1}, {side2} of factors passed to '
'`product_factors` must be both perfect squares.')
product = np.einsum('...abi,...cdj->...acbdij',
factors1.reshape((3, n1, n1, rank1)),
factors2.reshape((3, n2, n2, rank2))
) # [3, n1, n2, n1, n2, R1, R2]
return np.reshape(product, (3, n1 * n2 * n1 * n2, rank1 * rank2))
def get_4x4x4_strassen_squared() -> np.ndarray:
"""Returns Strassen^2 factorization for fast multiplication of 4x4 matrices.
This factorization is obtained by squaring (recursively applying twice)
Strassen's rank-7 factorization of T_2.
Returns:
[3, 16, 49]-shaped array representing a rank-49 factorization of the
(symmetrized version of the) matrix multiplication tensor T_4 = <4, 4, 4>
in standard arithmetic.
"""
strassen = _get_2x2x2_strassen() # [3, 4, 7]
return _product_factors(strassen, strassen) # [3, 16, 49]
| alphatensor-main | benchmarking/factorizations.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for working with matrix multiplication tensor factorizations."""
import gc
import timeit
from typing import Callable, List, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import tree
BlockMatrix = List[List[jnp.ndarray]]
def block_split(matrix: jnp.ndarray, n_rows: int, n_cols: int) -> BlockMatrix:
"""Splits `matrix` into a `n_rows x n_cols` block matrix."""
rows = jnp.split(matrix, n_rows, axis=0)
return [jnp.split(row, n_cols, axis=1) for row in rows]
def get_matrix_multiplication_tensor(n: int) -> np.ndarray:
"""Returns the matrix multiplication tensor T_n.
For n >= 1, T_n is a 3D tensor of shape [n*n, n*n, n*n] representing
- the bilinear operation (A, B) -> (AB)^T where A, B are two [n, n] matrices,
- or equivalently the trilinear operation (A, B, C) -> trace(ABC), where
A, B, C are three [n, n] matrices.
Args:
n: Size of the matrix multiplication tensor to be returned.
Returns:
NumPy array of shape [n^2, n^2, n^2] representing the tensor T_n.
"""
result = np.full((n ** 2, n ** 2, n ** 2), 0, dtype=np.int32)
for i in range(n):
for j in range(n):
for k in range(n):
result[i * n + j][j * n + k][k * n + i] = 1
return result
def algorithm_from_factors(
factors: np.ndarray) -> Callable[[BlockMatrix, BlockMatrix], BlockMatrix]:
"""Returns a JAX function implementing the algorithm described by `factors`.
Args:
factors: Matricized factorization of a matrix multiplication tensor, i.e.
an array of shape [3, n, n, rank].
Returns:
Function, which given two block matrices `a` and `b` returns the block
matrix `c` given by `c = a @ b`.
"""
assert factors[0].shape[0] == factors[1].shape[0]
assert factors[1].shape[0] == factors[2].shape[0]
factors = [factors[0].copy(), factors[1].copy(), factors[2].copy()]
n = int(np.sqrt(factors[0].shape[0]))
rank = factors[0].shape[-1]
factors[0] = factors[0].reshape(n, n, rank)
factors[1] = factors[1].reshape(n, n, rank)
factors[2] = factors[2].reshape(n, n, rank)
# The factors are for the transposed (symmetrized) matrix multiplication
# tensor. So to use the factors, we need to transpose back.
factors[2] = factors[2].transpose(1, 0, 2)
def f(a: BlockMatrix, b: BlockMatrix) -> BlockMatrix:
"""Multiplies block matrices `a` and `b`."""
n = len(a)
result = [[None] * n for _ in range(n)]
for alpha in range(rank):
left = None
for i in range(n):
for j in range(n):
if factors[0][i, j, alpha] != 0:
curr = factors[0][i, j, alpha] * a[i][j]
if left is None:
left = curr
else:
left += curr
right = None
for j in range(n):
for k in range(n):
if factors[1][j, k, alpha] != 0:
curr = factors[1][j, k, alpha] * b[j][k]
if right is None:
right = curr
else:
right += curr
matrix_product = left @ right
for i in range(n):
for k in range(n):
if factors[2][i, k, alpha] != 0:
curr = factors[2][i, k, alpha] * matrix_product
if result[i][k] is None:
result[i][k] = curr
else:
result[i][k] += curr
return result
return f
def _get_n_from_factors(factors: np.ndarray) -> int:
"""Computes the matrix multiplication tensor size n based on `factors`.
E.g. when multiplying 2x2 matrices with Strassen, the `factors` are of shape
[4, 7], and this function will return 2.
Args:
factors: [3, n^2, R] shaped NumPy array representing a factorization of T_n.
Returns:
n, the size of matrices being multiplied by the algorithm represented by
`factors`.
"""
u, v, w = factors
# Assert that the tensor is a cube.
assert u.shape[0] == v.shape[0]
assert u.shape[0] == w.shape[0]
n = int(np.sqrt(u.shape[0]))
assert u.shape[0] == n ** 2
return n
def _generate_random_matrices(matrix_dims: Tuple[int, int, int],
seed: int) -> Tuple[np.ndarray, np.ndarray]:
"""Generates two random NumPy matrices to be multiplied."""
np.random.seed(seed)
a = np.random.randn(matrix_dims[0], matrix_dims[1])
b = np.random.randn(matrix_dims[1], matrix_dims[2])
return a, b
def _device_put(*arrays, dtype: jnp.dtype) -> ...:
"""Converts NumPy arrays into JAX arrays and sends them to GPU."""
return tree.map_structure(
lambda x: jax.device_put(jnp.array(x).astype(dtype)), arrays)
def _get_baseline_op(matrix_dims: Tuple[int, int, int],
dtype: jnp.dtype,
n_repeat: int,
seed: int) -> Callable[[], None]:
"""Returns a function that applies `jnp.dot` `n_repeat` times."""
full_a, full_b = _generate_random_matrices(matrix_dims, seed=seed)
full_a, full_b = _device_put(full_a, full_b, dtype=dtype)
def _vanilla_single_timing() -> None:
c = full_b
for _ in range(n_repeat):
c = jnp.dot(full_a, c)
c.block_until_ready()
return _vanilla_single_timing
def _get_factorization_op(factors: np.ndarray,
matrix_dims: Tuple[int, int, int],
dtype: jnp.dtype,
n_repeat: int,
seed: int) -> Callable[[], None]:
"""Returns an op that applies the `factors` algorithm `n_repeat` times."""
n = _get_n_from_factors(factors)
full_a, full_b = _generate_random_matrices(matrix_dims, seed=seed)
a = block_split(full_a, n, n)
b = block_split(full_b, n, n)
a, b = _device_put(a, b, dtype=dtype)
jitted_algorithm = jax.jit(algorithm_from_factors(factors))
def _jitted_algorithm_timing() -> None:
c = b
for _ in range(n_repeat):
c = jitted_algorithm(a, c)
c[0][0].block_until_ready()
return _jitted_algorithm_timing
def _benchmark_op(op: Callable[[], None], num_trials: int) -> List[float]:
"""Benchmarks `op` `num_trials` times and returns all timings."""
# Warmup.
for _ in range(10):
op()
gc.disable() # Prevent garbage collection from interfering with timings.
timings = []
for _ in range(num_trials):
s = timeit.default_timer()
op()
e = timeit.default_timer()
timings.append(e - s)
gc.enable()
return timings
def benchmark_jnp_dot(matrix_dims: Tuple[int, int, int],
num_trials: int,
dtype: jnp.dtype = jnp.float32,
average: int = 20,
seed: int = 42) -> np.ndarray:
"""Benchmarks `jnp.dot`."""
baseline_op = _get_baseline_op(matrix_dims, dtype, average, seed)
timings = _benchmark_op(baseline_op, num_trials)
return np.array(timings) / average
def benchmark_factorized_algorithm(factors: np.ndarray,
matrix_dims: Tuple[int, int, int],
num_trials: int,
dtype: jnp.dtype = jnp.float32,
average: int = 20,
seed: int = 42) -> np.ndarray:
"""Benchmarks the fast matrix multiplication algorithm given by `factors`."""
factorization_algorithm_op = _get_factorization_op(
factors, matrix_dims, dtype, average, seed)
timings = _benchmark_op(factorization_algorithm_op, num_trials)
return np.array(timings) / average
| alphatensor-main | benchmarking/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DeepMind COVID-19 modelling."""
| dm_c19_modelling-main | __init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.