python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches a Launchpad program as a test, using multiple processes.
Usage:
class LaunchTest(absltest.TestCase):
def test_run_program(self):
program = ... # Create a program
# Configure process resources
local_resources = dict(
foo=lp.PythonProcess(
interpreter=...
)
)
lp.launch(program, launch_type='test_mp',
local_resources=local_resources, test_case=self)
"""
import os
import signal
import typing
from typing import Any, Mapping, Optional
from absl.testing import absltest
from launchpad import context
from launchpad import flags as lp_flags
from launchpad import program as lp_program
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
def launch(program: lp_program.Program,
test_case: absltest.TestCase,
local_resources: Optional[Mapping[str, Any]] = None):
"""Launches a program using multiple processes as a test."""
# Set up the launch context (launch type & launch config) for all nodes
local_resources = local_resources or {}
for label, nodes in program.groups.items():
launch_config = local_resources.get(label, None)
for node in nodes:
node._initialize_context(
context.LaunchType.TEST_MULTI_PROCESSING,
launch_config=launch_config)
# Notify the input handles
for label, nodes in program.groups.items():
for node in nodes:
for handle in node.input_handles:
handle.connect(node, label)
# Bind addresses
for node in program.get_all_nodes():
node.bind_addresses()
label_to_commands = {}
process_handles = {}
for label, nodes in program.groups.items():
# to_executables() is a static method, so we can call it from any of the
# nodes in this group.
# Somehow pytype thinks to_executables() gets wrong arg count.
# pytype: disable=wrong-arg-count
commands = nodes[0].to_executables(nodes, label, nodes[0].launch_context)
# pytype: enable=wrong-arg-count
if commands:
label_to_commands[label] = commands
# Not to create to actual processes, in case of failures in this loop.
process_handles[label] = []
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = worker_manager_v2.WorkerManager(
# Subprocess sends SIGINT back via lp.stop(), which stops the program.
kill_workers_upon_sigint=True,
# Kill all processes immediately.
termination_notice_secs=0)
else:
manager = worker_manager.WorkerManager(kill_main_thread=False)
for label, commands in label_to_commands.items():
for command in commands:
env = {}
env.update(os.environ)
env.update(command.env_overrides)
manager.process_worker(label, command.command_as_list, env=env)
def _cleanup(manager=manager):
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = typing.cast(worker_manager_v2.WorkerManager, manager)
manager._set_stop_event_and_terminate_process_workers(sig=signal.SIGKILL)
else:
typing.cast(worker_manager.WorkerManager,
manager).cleanup_after_test(test_case)
test_case.addCleanup(_cleanup)
return manager
| launchpad-master | launchpad/launch/test_multi_processing/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.launch.test_multi_processing.launch."""
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import launchpad as lp
from launchpad import program
from launchpad.launch.test_multi_processing import launch
from launchpad.nodes.python import local_multi_processing
from launchpad.nodes.python import node as python
from launchpad.program_stopper import program_stopper
def _get_default_py_node_config():
return local_multi_processing.PythonProcess(
)
def _noop():
pass
def _fail():
raise RuntimeError('Some error.')
def _block():
while not lp.wait_for_stop(2):
pass
def _stop(stopper):
stopper()
class LaunchTest(parameterized.TestCase):
def setUp(self):
super().setUp()
launch.worker_manager._HAS_MAIN_MANAGER = False
@parameterized.parameters(False, True)
def test_wait_for_one(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
p = program.Program('test')
p.add_node(python.PyNode(_noop), 'noop')
resources = dict(noop=_get_default_py_node_config())
processes = launch.launch(p, test_case=self, local_resources=resources)
if use_wm_v2:
p_dict = processes._process_workers # pytype:disable=attribute-error
else:
p_dict = processes._active_workers # pytype:disable=attribute-error
self.assertEqual(list(p_dict.keys()), ['noop'])
self.assertLen(p_dict['noop'], 1)
# Wait until termination
self.assertEqual(p_dict['noop'][0].wait(), 0)
@parameterized.parameters(False, True)
def test_wait_for_all(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
p = program.Program('test')
p.add_node(python.PyNode(_noop), 'noop')
resources = dict(noop=_get_default_py_node_config())
processes = launch.launch(p, test_case=self, local_resources=resources)
processes.wait()
@parameterized.parameters(False, True)
def test_wait_for_some_only_waits_for_specified_node_groups(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
p = program.Program('test')
with p.group('main'):
p.add_node(python.PyNode(_noop))
with p.group('daemon'):
p.add_node(python.PyNode(_block))
resources = dict(
main=_get_default_py_node_config(),
daemon=_get_default_py_node_config())
processes = launch.launch(p, test_case=self, local_resources=resources)
processes.wait(['main'])
@parameterized.parameters(False, True)
def test_wait_for_some_detects_exception_in_any_node_group(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
p = program.Program('test')
with p.group('main'):
p.add_node(python.PyNode(_block))
with p.group('daemon'):
p.add_node(python.PyNode(_fail))
resources = dict(
main=_get_default_py_node_config(),
daemon=_get_default_py_node_config())
processes = launch.launch(p, test_case=self, local_resources=resources)
with self.assertRaises(RuntimeError):
processes.wait(['main'])
@parameterized.parameters(False, True)
def test_grouping(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
# This verifies the process handles are grouped correctly
p = program.Program('test')
with p.group('foo'):
for _ in range(1):
p.add_node(python.PyNode(_noop))
with p.group('bar'):
for _ in range(2):
p.add_node(python.PyNode(_noop))
resources = dict(
foo=_get_default_py_node_config(), bar=_get_default_py_node_config())
processes = launch.launch(p, test_case=self, local_resources=resources)
if use_wm_v2:
p_dict = processes._process_workers # pytype:disable=attribute-error
else:
p_dict = processes._active_workers # pytype:disable=attribute-error
self.assertCountEqual(list(p_dict.keys()), ['foo', 'bar'])
self.assertLen(p_dict['foo'], 1)
self.assertLen(p_dict['bar'], 2)
@parameterized.parameters(False, True)
def test_program_stopper(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
# This verifies the program stopper works for test_multi_processing
p = program.Program('test')
with p.group('block'):
p.add_node(python.PyNode(_block))
with p.group('stop'):
p.add_node(python.PyNode(_stop, program_stopper.make_program_stopper(
lp.context.LaunchType.TEST_MULTI_PROCESSING)))
resources = dict(
block=_get_default_py_node_config(), stop=_get_default_py_node_config())
processes = launch.launch(p, test_case=self, local_resources=resources)
processes.wait()
@parameterized.parameters(False, True)
def test_cleanup(self, use_wm_v2):
self.enter_context(flagsaver.flagsaver(lp_worker_manager_v2=use_wm_v2))
# Test verifies that test cleanup works.
p = program.Program('test')
with p.group('block'):
p.add_node(python.PyNode(_block))
resources = dict(block=_get_default_py_node_config())
launch.launch(p, test_case=self, local_resources=resources)
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/test_multi_processing/launch_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature testing utilities."""
import distutils
import distutils.spawn
import os
from typing import Text
def _has_command(command: Text) -> bool:
"""Determines whether a command is available in the user's shell.
Args:
command: The name of the command.
Returns:
Whether the command exists.
"""
return distutils.spawn.find_executable(command) is not None
def has_x() -> bool:
"""Determines whether X is running."""
return True if os.environ.get('DISPLAY', '') else False
def has_xterm() -> bool:
"""Determines whether xterm can run."""
return has_x() and _has_command('xterm')
def has_tmux() -> bool:
"""Determines whether tmux can run."""
return _has_command('tmux')
def has_byobu() -> bool:
"""Determines whether byobu can run."""
return _has_command('byobu')
def has_gnome_terminal() -> bool:
"""Determines whether gnome-terminal can run."""
return has_x() and _has_command('gnome-terminal')
| launchpad-master | launchpad/launch/run_locally/feature_testing.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functionality for running commands locally."""
import collections
import os
from typing import Optional, Sequence, Text, Union
from absl import logging
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.launch.run_locally import feature_testing
from launchpad.launch.run_locally import launch_local_current_terminal
from launchpad.launch.run_locally import launch_local_gnome
from launchpad.launch.run_locally import launch_local_output_to_files
from launchpad.launch.run_locally import launch_local_tmux
from launchpad.launch.run_locally import launch_local_xterm
SEPARATE_TERMINAL_XTERM = 'xterm'
SEPARATE_TERMINAL_GNOME_TERMINAL_WINDOWS = 'gnome-terminal'
SEPARATE_TERMINAL_GNOME_TERMINAL_TABS = 'gnome-terminal-tabs'
SEPARATE_TERMINAL_TMUX_SESSION = 'tmux_session'
SEPARATE_TERMINAL_BYOBU_SESSION = 'byobu_session'
SEPARATE_TERMINAL_CURRENT_TERMINAL = 'current_terminal'
SEPARATE_TERMINAL_OUTPUT_TO_FILES = 'output_to_files'
SEPARATE_TERMINAL_MODES = (
SEPARATE_TERMINAL_XTERM,
SEPARATE_TERMINAL_GNOME_TERMINAL_WINDOWS,
SEPARATE_TERMINAL_GNOME_TERMINAL_TABS,
SEPARATE_TERMINAL_TMUX_SESSION,
SEPARATE_TERMINAL_BYOBU_SESSION,
SEPARATE_TERMINAL_CURRENT_TERMINAL,
SEPARATE_TERMINAL_OUTPUT_TO_FILES,
)
TERMINALS_FOR_X = (
SEPARATE_TERMINAL_XTERM,
SEPARATE_TERMINAL_GNOME_TERMINAL_WINDOWS,
SEPARATE_TERMINAL_GNOME_TERMINAL_TABS,
)
# Map terminal name to the corresponding launch function
_LOCAL_LAUNCHER_MAP = {
SEPARATE_TERMINAL_XTERM:
launch_local_xterm.launch_with_xterm,
SEPARATE_TERMINAL_GNOME_TERMINAL_WINDOWS:
launch_local_gnome.launch_with_gnome_terminal_windows,
SEPARATE_TERMINAL_GNOME_TERMINAL_TABS:
launch_local_gnome.launch_with_gnome_terminal_tabs,
SEPARATE_TERMINAL_TMUX_SESSION:
launch_local_tmux.launch_with_tmux_session,
SEPARATE_TERMINAL_BYOBU_SESSION:
launch_local_tmux.launch_with_byobu_session,
SEPARATE_TERMINAL_CURRENT_TERMINAL:
launch_local_current_terminal.launch_in_current_terminal,
SEPARATE_TERMINAL_OUTPUT_TO_FILES:
launch_local_output_to_files.launch_and_output_to_files,
}
class CommandToLaunch(
collections.namedtuple(
'command_to_launch',
['command_as_list', 'env_overrides', 'resource_name', 'worker_name'])):
@property
def title(self):
return '{}_{}'.format(self.resource_name, self.worker_name)
def _get_terminal(given_terminal: Optional[Text]):
"""Returns the terminal for local launch based on X & command availability.
By order of priority it will:
- use the provided `given_terminal`
- default to the shell environment variable `LAUNCHPAD_LAUNCH_LOCAL_TERMINAL`
if set
- or select the first supported option in: Gnome, Tmux, Xterm and current
terminal.
Args:
given_terminal: The terminal identifier to use or `None`.
Returns:
One of the legal terminal modes (a string in SEPARATE_TERMINAL_MODES) based
on the priority described above.
"""
if (given_terminal is not None and
given_terminal not in SEPARATE_TERMINAL_MODES):
raise ValueError('`terminal` got a mode that it does not '
'understand %r. Please choose from %r.' %
(given_terminal, SEPARATE_TERMINAL_MODES))
terminal = given_terminal or os.environ.get('LAUNCHPAD_LAUNCH_LOCAL_TERMINAL',
None)
# Set terminal to None, if the chosen terminal cannot be used because we are
# running without X.
if not feature_testing.has_x() and terminal in TERMINALS_FOR_X:
logging.info('Not using %s to launch, since DISPLAY is not set.', terminal)
terminal = None
if terminal is None:
if feature_testing.has_gnome_terminal():
terminal = SEPARATE_TERMINAL_GNOME_TERMINAL_WINDOWS
elif feature_testing.has_tmux():
terminal = SEPARATE_TERMINAL_TMUX_SESSION
elif feature_testing.has_xterm():
terminal = SEPARATE_TERMINAL_XTERM
# Examine the type of terminal and explain why it is chosen.
if terminal is None:
logging.info('Launching in the same console since we cannot find '
'gnome-terminal, tmux, or xterm.')
terminal = SEPARATE_TERMINAL_CURRENT_TERMINAL
else:
logging.info(
'Launching with %s because the `terminal` launch option '
'is not explicitly specified. To remember your preference '
'(assuming tmux_session is the preferred option), either: \n'
'1. Pass the `terminal` launch option (e.g., '
'`lp.launch(program, terminal="tmux_session")`).\n'
'2. Set the following in your bashrc to remember your '
'preference:\n'
' export LAUNCHPAD_LAUNCH_LOCAL_TERMINAL=tmux_session', terminal)
return terminal
def run_commands_locally(
commands: Sequence[CommandToLaunch], terminal=None
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launches a program using multiple processes."""
# Minimally validate all the commands before executing any of them. This also
# gives better errors in the case that a terminal implementation executes
# the commands via a wrapper.
for command in commands:
if not os.access(command.command_as_list[0], os.X_OK):
raise ValueError("Unable to execute '%s'" % command.command_as_list[0])
return _LOCAL_LAUNCHER_MAP[_get_terminal(terminal)](commands)
| launchpad-master | launchpad/launch/run_locally/run_locally.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run commands to launch Launchpad workers in tmux."""
import atexit
import os
import subprocess
from typing import Union
from absl import flags
from absl import logging
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.launch.run_locally import feature_testing
tty_write = print
def launch_with_tmux_session(
commands_to_launch,
session_name_prefix=None) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch multiple CommandToLaunch tuples in a new tmux session."""
if not feature_testing.has_tmux():
raise ValueError(
'tmux is not available, please choose another way to launch '
'or install it.')
session_name_prefix = session_name_prefix or flags.FLAGS.tmux_session_name
return _launch_with_multiplex_session(commands_to_launch,
session_name_prefix,
'tmux')
def launch_with_byobu_session(
commands_to_launch,
session_name_prefix='launchpad'
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch multiple CommandToLaunch tuples in a new byobu session."""
if not feature_testing.has_byobu():
raise ValueError(
'byobu is not available, please choose another way to launch '
'or install it.')
return _launch_with_multiplex_session(commands_to_launch,
session_name_prefix,
'byobu')
def _launch_with_multiplex_session(
commands_to_launch,
session_name_prefix,
multiplexer) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch multiple CommandToLaunch tuples in a new multiplex session.
Args:
commands_to_launch: An iterable of `CommandToLaunch` namedtuples.
session_name_prefix: Leading part of the name given to the new tmux session.
If there is no existing session with this name, it will be used as-is,
however if another session exists the name will be uniquified by appending
an incrementing counter.
multiplexer : tmux or byobu
Returns:
Worker manager that can be used to wait for a program execution to finish.
"""
# Make a new session with the unmodified name, if this fails add a suffix to
# the name and retry.
session_name = session_name_prefix
suffix_index = 0
while True:
try:
subprocess.check_output(
[multiplexer, 'new-session', '-d', '-s', session_name],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if 'duplicate session' in e.output.decode():
logging.info('%r session %r already exists, trying to uniquify...',
multiplexer, session_name)
session_name = '{}_{}'.format(session_name_prefix, suffix_index)
suffix_index += 1
else:
raise e # If `tmux new-session` failed for some other reason.
else:
break
def get_session_processes():
p = subprocess.run([
multiplexer, 'list-panes', '-t', session_name, '-s', '-F',
'"#{pane_pid}"'], stdout=subprocess.PIPE, check=True)
# Kill all subprocesses in the tmux session
return [int(pid) for pid in p.stdout.replace(b'"', b'').strip().split()]
# Copy over the environment of the current process to the new session.
for key, value in os.environ.items():
subprocess.check_call(
[multiplexer, 'set-environment', '-t', session_name, key, value])
# For each node to run, create the corresponding launch command
# and run it with subprocess.Popen.
for command_to_launch in commands_to_launch:
# Apply command-specific overrides to environment variables.
env_as_list = [
f'{k}={v}' for k, v in command_to_launch.env_overrides.items()]
# When the program is done, echo the command so it can be copy-pasted, and
# then drop into a shell.
command_str = subprocess.list2cmdline(env_as_list +
command_to_launch.command_as_list)
inner_command = f'{command_str}; echo "{command_str}"; exec $SHELL'
window_name = command_to_launch.title
command = [
multiplexer,
'new-window',
'-t',
session_name,
'-n',
window_name,
inner_command,
]
# Make the process block until it has completed.
subprocess.Popen(command)
tty_write(
f'Opened new {multiplexer} session called `{session_name}`. '
f'If you are already in a tmux session, use `Ctrl+B W` as a '
f'convenient way to switch to the new session. '
f'Otherwise run \n\n {multiplexer} a -t "{session_name}"\n\nTo change '
f'the name of the tmux sessions use the `--tmux_session_name` flag. You '
f'can terminate all the processes and the {multiplexer} session by '
f'pressing Ctrl-C here.\n')
if flags.FLAGS.tmux_open_window is not None:
command = [
multiplexer, 'switch-client', '-t',
f'{session_name}:{flags.FLAGS.tmux_open_window}'
]
subprocess.run(command, check=True)
if lp_flags.LP_WORKER_MANAGER_V2.value:
# process_tree_depth=2 because the interpreter will be at level 2 of
# tmux -> bash -> interpreter.
manager = worker_manager_v2.WorkerManager(
handle_sigterm=True, kill_all_upon_sigint=True, process_tree_depth=2)
else:
manager = worker_manager.WorkerManager()
atexit.register(manager.wait)
for pid in get_session_processes():
manager.register_existing_process('tmux', pid)
return manager
| launchpad-master | launchpad/launch/run_locally/launch_local_tmux.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run commands to launch Launchpad workers in gnome-terminal."""
import atexit
import copy
import datetime
import os
import shlex
import signal
import stat
import subprocess
import tempfile
import time
from typing import Union
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.launch.run_locally import feature_testing
GNOME_TERMINAL_SERVER_PATHS = [
'/usr/libexec/gnome-terminal-server',
'/usr/lib/gnome-terminal/gnome-terminal-server',
]
def find_gnome_terminal_server():
"""Probe multiple locations for gnome-terminal-server, as this is distro specific."""
for path in GNOME_TERMINAL_SERVER_PATHS:
if os.path.isfile(path):
return path
return None
def _run_gnome_command(command, env):
"""Launches gnome command, retrying until gnome server is ready."""
retry_backoff = 0.1
while True:
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
process.wait()
if ('# Error creating terminal' not in stderr.decode('UTF-8') or
retry_backoff > 10):
if stdout:
print(stdout)
if stderr:
print(stderr)
break
time.sleep(retry_backoff)
retry_backoff *= 2
def _launch_in_windows(commands_to_launch, app_id):
"""Launches commands in gnome windows."""
for window_index, command_to_launch in enumerate(commands_to_launch):
set_title = (r'PS1=$; PROMPT_COMMAND=; '
'echo -en "\\033]0;{}\\a"'.format(command_to_launch.title))
inner_cmd = '{}; {}; exec $SHELL'.format(
set_title, subprocess.list2cmdline(command_to_launch.command_as_list))
terminal_command_list = [
'gnome-terminal',
'--app-id',
app_id, # Connects to the recently opened terminal server.
'--geometry',
'80x60+{}+{}'.format(window_index * 40, window_index * 40),
'--',
'bash',
'-c',
inner_cmd,
]
env = {}
env.update(os.environ)
env.update(command_to_launch.env_overrides)
_run_gnome_command(terminal_command_list, env)
def _launch_in_tabs(commands_to_launch, app_id):
"""Launches commands in gnome tabs."""
file_handle, command_file_path = tempfile.mkstemp('.sh')
os.close(file_handle)
atexit.register(os.remove, command_file_path)
with open(command_file_path, 'w') as command_file:
# The command file starts with setting up environment.
for key, value in os.environ.items():
# Remove these two keys so that new processes are created by the newly
# started gnome-terminal-server.
if key in ['GNOME_TERMINAL_SERVICE', 'GNOME_TERMINAL_SCREEN']:
continue
command_file.write(f'export {shlex.quote(key)}={shlex.quote(value)}\n')
for command_to_launch in commands_to_launch:
inner_cmd = '; '.join([
# Set the title (see https://superuser.com/a/1330292/156433).
'PS1=$',
'PROMPT_COMMAND=',
f'echo -en "\\033]0;{command_to_launch.title}\\a"',
# Run the actual command.
subprocess.list2cmdline(command_to_launch.command_as_list),
# Start a shell so that the tab doesn't close instantly when the
# command finishes.
'exec $SHELL',
])
terminal_command_list = [
'gnome-terminal',
'--tab',
'--',
'bash',
'-c',
inner_cmd,
]
env_overrides = []
for key, value in command_to_launch.env_overrides.items():
env_overrides.append(f'{shlex.quote(key)}={shlex.quote(value)}')
command_file.write(
subprocess.list2cmdline(env_overrides + terminal_command_list) + '\n')
os.chmod(command_file_path, os.stat(command_file_path).st_mode | stat.S_IEXEC)
_run_gnome_command(
['gnome-terminal', '--app-id', app_id, '--', command_file_path],
os.environ)
def launch_with_gnome_terminal(
commands_to_launch,
use_tabs=False
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch commands given as CommandToLaunch tuples with gnome-terminal.
Args:
commands_to_launch: An iterable of `CommandToLaunch` namedtuples.
use_tabs: Whether or not to run each command in a gnome tab (instead of a
window)
Returns:
Worker manager that can be used to wait for a program execution to finish.
"""
if lp_flags.LP_WORKER_MANAGER_V2.value:
commands_to_launch = copy.deepcopy(commands_to_launch)
for command_to_launch in commands_to_launch:
command_to_launch.command_as_list.append('--lp_worker_manager_v2')
# The new server-client architecture of gnome-terminal removes several
# extremely useful features. Relevant ones here are
# * The ability to launch commands as new tabs.
# * The ability to destroy gnome terminal windows from the command line.
# While there is nothing we can do about the former, there's a workaround
# for the latter. We start a new gnome-terminal-server and launch all
# windows belonging to this launchpad session in this server. To clean up,
# we can just kill the server and all windows get closed. This has the
# additional benefit that windows are grouped together when switching between
# applications with alt-tab.
if not feature_testing.has_gnome_terminal():
raise ValueError('`gnome-terminal` is not available, '
'please choose another way to launch.')
# Check if we can find the gnome-terminal-server.
gnome_terminal_server_path = find_gnome_terminal_server()
if not gnome_terminal_server_path:
raise ValueError(
'gnome-terminal-server is not present on your system but it is required '
'to launch locally with gnome-terminal. Searched: {}'.format(
GNOME_TERMINAL_SERVER_PATHS))
# Start session.
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# app-ids must be character only. Turn numbers in timestep to characters.
timestamp = ''.join(['abcdefghij'[int(c)] for c in timestamp])
app_id = 'launchpad.locallaunch.{}'.format(timestamp)
def preexec_fn():
# Prevents SIGINT from killing gnome-terminal-server too early
signal.signal(signal.SIGINT, signal.SIG_IGN)
server_process = subprocess.Popen([
gnome_terminal_server_path, '--app-id', app_id, '--name', app_id,
'--class', app_id
],
env=os.environ,
preexec_fn=preexec_fn)
if lp_flags.LP_WORKER_MANAGER_V2.value:
# process_tree_depth=2 because the interpreter will be at level 2 of
# gnome-terminal -> bash -> interpreter.
manager = worker_manager_v2.WorkerManager(
handle_sigterm=True, kill_all_upon_sigint=True, process_tree_depth=2)
else:
manager = worker_manager.WorkerManager()
atexit.register(manager.wait)
manager.register_existing_process('gnome', server_process.pid)
if use_tabs:
_launch_in_tabs(commands_to_launch, app_id)
else:
_launch_in_windows(commands_to_launch, app_id)
return manager
def launch_with_gnome_terminal_windows(commands_to_launch):
return launch_with_gnome_terminal(commands_to_launch, use_tabs=False)
def launch_with_gnome_terminal_tabs(commands_to_launch):
return launch_with_gnome_terminal(commands_to_launch, use_tabs=True)
| launchpad-master | launchpad/launch/run_locally/launch_local_gnome.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/run_locally/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the `decorate_output` wrapper script."""
import os
import subprocess
from absl.testing import absltest
class DecorateOutputTest(absltest.TestCase):
def test_annotates_each_line(self):
decorate_output = os.path.dirname(__file__) + '/decorate_output'
decorated_output = subprocess.check_output(
[decorate_output, '33', 'my title', 'seq', '10', '10', '30'])
self.assertListEqual(
decorated_output.split(b'\n'),
[
b'\x1b[1;33m[my title] 10\x1b[0;0m',
b'\x1b[1;33m[my title] 20\x1b[0;0m',
b'\x1b[1;33m[my title] 30\x1b[0;0m',
b'',
])
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/run_locally/decorate_output_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run commands to launch Launchpad workers in xterm."""
import atexit
import os
import subprocess
from typing import Union
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.launch.run_locally import feature_testing
def launch_with_xterm(
commands_to_launch
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch multiple commands given as CommandToLaunch tuples through xterm.
Args:
commands_to_launch: An iterable of `CommandToLaunch` namedtuples.
Returns:
Worker manager that can be used to wait for a program execution to finish.
"""
if not feature_testing.has_xterm():
raise ValueError(
'xterm is not available, please choose another way to launch.')
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = worker_manager_v2.WorkerManager(
handle_sigterm=True, kill_all_upon_sigint=True)
else:
manager = worker_manager.WorkerManager()
atexit.register(manager.wait)
for window_index, command_to_launch in enumerate(commands_to_launch):
inner_cmd = '{}; exec $SHELL'.format(
subprocess.list2cmdline(command_to_launch.command_as_list))
xterm_command_list = [
'xterm',
'-title',
command_to_launch.title,
'-sl',
'2000',
'-geometry',
'80x60+{}+{}'.format(window_index * 40, window_index * 40),
'-e',
inner_cmd,
]
env = {}
env.update(os.environ)
env.update(command_to_launch.env_overrides)
manager.process_worker(
command_to_launch.title, xterm_command_list, env=env)
return manager
| launchpad-master | launchpad/launch/run_locally/launch_local_xterm.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run commands to launch Launchpad workers output their logging to files."""
import atexit
import os
from typing import Union
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
# Use environment variable to direct logging to specified directory.
if 'LAUNCHPAD_LOGGING_DIR' in os.environ:
_LOGGING_DIR = os.environ['LAUNCHPAD_LOGGING_DIR']
else:
_LOGGING_DIR = '/tmp/launchpad_out/'
def launch_and_output_to_files(
commands_to_launch
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch commands given as CommandToLaunch and log the outputs to files.
Args:
commands_to_launch: An iterable of `CommandToLaunch` namedtuples.
Returns:
Worker manager that can be used to wait for a program execution to finish.
"""
titles = []
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = worker_manager_v2.WorkerManager(
handle_sigterm=True, kill_all_upon_sigint=True)
else:
manager = worker_manager.WorkerManager()
atexit.register(manager.wait)
print(f'Logs are being output to: {_LOGGING_DIR}. '
'The logging directory can be customized by setting the '
'LAUNCHPAD_LOGGING_DIR environment variable.')
for command_to_launch in commands_to_launch:
env = {}
env.update(os.environ)
env.update(command_to_launch.env_overrides)
title = command_to_launch.title
count = 0
while title in titles:
count += 1
title = command_to_launch.title + '_' + str(count)
titles.append(title)
filename = os.path.join(_LOGGING_DIR, title)
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
print(f'Logging to: {filename}')
with open(filename, 'w') as outfile:
manager.process_worker(
command_to_launch.title, command_to_launch.command_as_list,
env=env, stdout=outfile, stderr=outfile)
return manager
| launchpad-master | launchpad/launch/run_locally/launch_local_output_to_files.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run commands to launch Launchpad workers in current terminal."""
import atexit
import os
import subprocess
from typing import Union
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
_COLOUR_PALETTE = [
36, # Blue
33, # Yellow
32, # Green
34, # Purple
35, # Red
]
def launch_in_current_terminal(
commands_to_launch
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launch commands given as CommandToLaunch all in the same terminal.
Args:
commands_to_launch: An iterable of `CommandToLaunch` namedtuples.
Returns:
Worker manager that can be used to wait for a program execution to finish.
"""
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = worker_manager_v2.WorkerManager(
handle_sigterm=True, kill_all_upon_sigint=True)
else:
manager = worker_manager.WorkerManager()
atexit.register(manager.wait)
decorate_output = os.path.dirname(__file__) + '/decorate_output'
for i, command_to_launch in enumerate(commands_to_launch):
colour = _COLOUR_PALETTE[i % len(_COLOUR_PALETTE)]
env = {}
env.update(os.environ)
env.update(command_to_launch.env_overrides)
process = subprocess.Popen(
([decorate_output, str(colour), command_to_launch.title] +
command_to_launch.command_as_list),
env=env)
manager.register_existing_process(command_to_launch.title, process.pid)
return manager
| launchpad-master | launchpad/launch/run_locally/launch_local_current_terminal.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/local_multi_processing/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches a Launchpad program using multiple processes."""
from typing import Any, Mapping, Optional, Union
from launchpad import context
from launchpad import program as lp_program
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.launch.run_locally import run_locally
def launch(program: lp_program.Program,
local_resources: Optional[Mapping[str, Any]] = None,
terminal: str = 'gnome-terminal'
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launches a program using multiple processes."""
# Set up the launch context (launch type & launch config) for all nodes
local_resources = local_resources or {}
for label, nodes in program.groups.items():
launch_config = local_resources.get(label, None)
for node in nodes:
node._initialize_context(
context.LaunchType.LOCAL_MULTI_PROCESSING,
launch_config=launch_config)
# Notify the input handles
for label, nodes in program.groups.items():
for node in nodes:
for handle in node.input_handles:
handle.connect(node, label)
# Bind addresses
for node in program.get_all_nodes():
node.bind_addresses()
commands = []
for label, nodes in program.groups.items():
# to_executables() is a static method, so we can call it from any of the
# nodes in this group.
# pytype: disable=wrong-arg-count
commands.extend(nodes[0].to_executables(nodes, label,
nodes[0].launch_context))
# pytype: enable=wrong-arg-count
return run_locally.run_commands_locally(commands, terminal)
| launchpad-master | launchpad/launch/local_multi_processing/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands to run for multiple processes."""
from typing import Any, Mapping, List
class Command(object):
def __init__(self, command_as_list: List[str],
env_overrides: Mapping[str, Any], title: str):
self.command_as_list = command_as_list
self.env_overrides = env_overrides or {}
self.title = title
| launchpad-master | launchpad/launch/local_multi_processing/commands.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/local_multi_threading/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local Multithreading Launcher implementation."""
import atexit
from concurrent import futures
import sys
import threading
from typing import Optional
from absl import flags
from launchpad import context
from launchpad import flags as lp_flags
from launchpad import program as lp_program
from launchpad.launch import serialization
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
FLAGS = flags.FLAGS
def launch(program: lp_program.Program,
*,
serialize_py_nodes: Optional[bool] = None):
"""Launches a program using multiple threads."""
if serialize_py_nodes is None:
serialize_py_nodes = False
# Set up the launch context (launch type & launch config) for all nodes
for label, nodes in program.groups.items():
if serialize_py_nodes:
serialization.check_nodes_are_serializable(label, nodes)
for node in nodes:
node._initialize_context(
context.LaunchType.LOCAL_MULTI_THREADING,
launch_config=None)
# Notify the input handles
for label, nodes in program.groups.items():
for node in nodes:
for handle in node.input_handles:
handle.connect(node, label)
# Setup addressing
for node in program.get_all_nodes():
node.bind_addresses()
return thread_handler(program)
def thread_handler(program):
"""Runs the threads and wraps them in Worker Manager."""
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = worker_manager_v2.WorkerManager(
kill_all_upon_sigint=True,
)
else:
manager = worker_manager.WorkerManager(
)
for label, nodes in program.groups.items():
# to_executables() is a static method, so we can call it from any of the
# nodes in this group.
# Somehow pytype thinks to_executables() gets wrong arg count.
# pytype: disable=wrong-arg-count
executables = nodes[0].to_executables(nodes, label,
nodes[0].launch_context)
# pytype: enable=wrong-arg-count
for executable in executables:
manager.thread_worker(label, executable)
if sys.version_info[:2] >= (3, 9):
# Make sure `manager.wait` will be called before ThreadPoolExecutor atexit
# method. Otherwise running program will not be able to start new threads.
futures.ThreadPoolExecutor
threading._register_atexit(manager.wait) # pytype: disable=module-attr # py39-upgrade
else:
atexit.register(manager.wait)
return manager
| launchpad-master | launchpad/launch/local_multi_threading/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to resolve addresses in multithreaded tests."""
from typing import Sequence
from launchpad import address as lp_address
from launchpad.nodes import base
def bind_addresses(nodes: Sequence[base.Node]):
for node in nodes:
for address in node.addresses:
address.bind(lp_address.SimpleLocalAddressBuilder())
| launchpad-master | launchpad/launch/test_multi_threading/address_builder.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/test_multi_threading/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches a Launchpad program as a multithreaded integration test.
This is very similar to local_multi_threading/launch.py but terminates the
process upon exception (instead of entering pdb).
"""
from concurrent import futures
import os
import signal
import threading
import time
import typing
from typing import Optional, Union
from absl import logging
from absl.testing import absltest
from launchpad import context
from launchpad import flags as lp_flags
from launchpad.launch import serialization
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
def launch(program,
test_case: Optional[absltest.TestCase] = None,
*,
serialize_py_nodes: Optional[bool] = None
) -> Union[worker_manager.WorkerManager,
worker_manager_v2.WorkerManager]:
"""Launches the program as a multi-threaded integration test."""
if serialize_py_nodes is None:
serialize_py_nodes = True
for node in program.get_all_nodes():
node._initialize_context(
context.LaunchType.TEST_MULTI_THREADING,
launch_config=None)
# Notify the input handles
for label, nodes in program.groups.items():
if serialize_py_nodes:
serialization.check_nodes_are_serializable(label, nodes)
for node in nodes:
for handle in node.input_handles:
handle.connect(node, label)
# Bind addresses
for node in program.get_all_nodes():
node.bind_addresses()
if lp_flags.LP_WORKER_MANAGER_V2.value:
manager = worker_manager_v2.WorkerManager(
termination_notice_secs=None,
)
else:
manager = worker_manager.WorkerManager()
# Run a background thread to detect and handle node failures.
stop_node_monitor = threading.Event()
monitor_future = futures.Future()
def _node_monitor():
while not stop_node_monitor.is_set():
time.sleep(.5)
try:
manager.check_for_thread_worker_exception()
except Exception as e:
monitor_future.set_exception(e)
logging.exception('One of the workers has FAILED!')
# Wait for 3s, in case the exception is caught timely.
time.sleep(3)
if not stop_node_monitor.is_set():
# The exception isn't caught in time and we have to kill the test to
# avoid a timeout. This happens when, for example, a client running in
# the main thread trying (forever) to talk to a failed server.
logging.info('Killing the test due to an uncaught exception. See the '
'above for stack traces.')
os.kill(os.getpid(), signal.SIGQUIT)
return
monitor_future.set_result(None)
node_monitor_thread = threading.Thread(target=_node_monitor, daemon=True)
node_monitor_thread.start()
def _cleanup():
stop_node_monitor.set()
node_monitor_thread.join()
monitor_future.result()
if lp_flags.LP_WORKER_MANAGER_V2.value:
typing.cast(worker_manager_v2.WorkerManager,
manager).stop_event.set()
manager.wait()
else:
typing.cast(worker_manager.WorkerManager,
manager).cleanup_after_test(test_case)
if test_case is not None:
test_case.addCleanup(_cleanup)
for label, nodes in program.groups.items():
# to_executables() is a static method, so we can call it from any of the
# nodes in this group.
# Somehow pytype thinks to_executables() gets wrong arg count.
# pytype: disable=wrong-arg-count
executables = nodes[0].to_executables(nodes, label, nodes[0].launch_context)
# pytype: enable=wrong-arg-count
for executable in executables:
manager.thread_worker(label, executable)
return manager
| launchpad-master | launchpad/launch/test_multi_threading/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.launch.test_multi_threading.launch."""
import signal
import threading
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from launchpad import context
from launchpad import program as lp_program
from launchpad.launch import serialization_test
from launchpad.launch.test_multi_threading import launch
from launchpad.nodes.python import node as python
from launchpad.program_stopper import program_stopper
import mock
FLAGS = flags.FLAGS
def _block():
if flags.FLAGS.lp_worker_manager_v2:
launch.worker_manager_v2.wait_for_stop()
else:
launch.worker_manager.wait_for_stop()
def _stop(stopper):
stopper()
class LaunchTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._sigterm_patcher = mock.patch.object(
signal, 'SIGTERM', new=signal.SIGUSR1)
self._sigterm_patcher.start()
def tearDown(self):
self._sigterm_patcher.stop()
super().tearDown()
@parameterized.parameters(False, True)
def test_one_py_node_program(self, use_wm_v2):
FLAGS.lp_worker_manager_v2 = use_wm_v2
has_run = threading.Event()
def run():
has_run.set()
program = lp_program.Program('test')
program.add_node(python.PyNode(run), label='run')
launch.launch(program, test_case=self, serialize_py_nodes=False)
has_run.wait()
@parameterized.parameters(False, True)
def test_handle_exception(self, use_wm_v2):
FLAGS.lp_worker_manager_v2 = use_wm_v2
def run():
raise RuntimeError('Launchpad has stopped working')
program = lp_program.Program('test')
program.add_node(python.PyNode(run), label='run')
with self.assertRaisesRegex(RuntimeError, 'Launchpad has stopped working'):
waiter = launch.launch(program, test_case=self)
waiter.wait()
@parameterized.parameters(False, True)
def test_program_stopper(self, use_wm_v2):
FLAGS.lp_worker_manager_v2 = use_wm_v2
# This verifies the program stopper works for test_multi_threading
p = lp_program.Program('test')
with p.group('block'):
p.add_node(python.PyNode(_block))
with p.group('stop'):
p.add_node(python.PyNode(_stop, program_stopper.make_program_stopper(
context.LaunchType.TEST_MULTI_THREADING)))
threads = launch.launch(p, test_case=self)
threads.wait()
@parameterized.parameters(False, True)
def test_cleanup(self, use_wm_v2):
FLAGS.lp_worker_manager_v2 = use_wm_v2
# Test verifies that test cleanup works.
p = lp_program.Program('test')
with p.group('block'):
p.add_node(python.PyNode(_block))
launch.launch(p, test_case=self)
class SerializationTest(serialization_test.ErrorOnSerializationMixin):
@property
def _launch(self):
return launch.launch
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/test_multi_threading/launch_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/examples/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/examples/hello_world/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example is the simplest possible Launchpad program."""
from absl import app
import launchpad as lp
class HelloWorld:
"""A node that prints hello world and exits."""
def __init__(self) -> None:
"""Initializes Hello World."""
pass
def run(self) -> None:
"""Entry point."""
print('Hello World!!!')
def make_program() -> lp.Program:
"""Define the program topology."""
program = lp.Program('hello_world')
node = lp.PyClassNode(HelloWorld)
program.add_node(node, label='hello_printer')
return program
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
program = make_program()
lp.launch(program)
if __name__ == '__main__':
app.run(main)
| launchpad-master | launchpad/examples/hello_world/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/examples/consumer_producers/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches consumer-producer example on Google Cloud using Vertex AI."""
import os
from absl import app
from absl import flags
import launchpad as lp
from launchpad.examples.consumer_producers.program import make_program
from launchpad.nodes.python import xm_docker
_NUM_PRODUCERS = flags.DEFINE_integer('num_producers', 2,
'The number of concurrent producers.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
script_dir = os.path.dirname(os.path.realpath(__file__))
launchpad_dir = os.path.dirname(os.path.dirname(script_dir))
docker_requirements = os.path.join(script_dir, 'requirements.txt')
docker_config = xm_docker.DockerConfig(launchpad_dir, docker_requirements)
resources = {'producer': docker_config, 'consumer': docker_config}
program = make_program(num_producers=_NUM_PRODUCERS.value)
lp.launch(
program,
launch_type=lp.LaunchType.VERTEX_AI,
xm_resources=resources)
if __name__ == '__main__':
app.run(main)
| launchpad-master | launchpad/examples/consumer_producers/launch_vertex_ai.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example introduces basic notions in Launchpad."""
from absl import app
from absl import flags
import launchpad as lp
from launchpad.examples.consumer_producers.program import make_program
_NUM_PRODUCERS = flags.DEFINE_integer('num_producers', 2,
'The number of concurrent producers.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Define a program which describes the topology of communicating nodes and
# edges. In more involved examples, several programs can be defined and
# launched at once.
program = make_program(num_producers=_NUM_PRODUCERS.value)
# Note that at launch time, none of the producers has been instantiated.
# Producers are instantiated only at runtime.
lp.launch(program)
if __name__ == '__main__':
app.run(main)
| launchpad-master | launchpad/examples/consumer_producers/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the example producer-consumer program."""
from typing import List
from absl import logging
import launchpad as lp
class Consumer:
"""A simple consumer that calls producers to perform some work."""
def __init__(
self,
producers: List[lp.CourierClient],
) -> None:
"""Initializes a Consumer.
Args:
producers: a list of Producer handles.
"""
self._producers = producers
def run(self) -> None:
"""Entry point of the consumer."""
# As a toy example we run 10 steps to interact with producers. Typically,
# this would be replaced with an infinite loop or a loop with some stopping
# criterion.
for _ in range(10):
self.step()
# Stop the whole program (consumer and producers). Simply returning here
# would stop the consumer but not the producers.
lp.stop()
def step(self) -> None:
"""Tells all the producers to perform one step of work."""
# Call the producers to asynchronously produce work given a dummy context
# represented by a counter.
futures = [
producer.futures.work(context)
for context, producer in enumerate(self._producers)
]
# Block to gather the results of all the producers.
results = [future.result() for future in futures]
logging.info('Results: %s', results)
class Producer:
"""A bare-bones producer."""
def work(self, context: int) -> int:
# Add code here to perform work. Note that this method can be called in
# multiple threads because of the use of Courier futures, and so it has to
# be thread safe! In this example the producer is stateless, so thread
# safety is not a concern.
return context
def make_program(num_producers: int) -> lp.Program:
"""Define the distributed program topology."""
program = lp.Program('consumer_producers')
# Use `program.group()` to group homogeneous nodes.
with program.group('producer'):
# Add a `CourierNode` to the program. `lp.CourierNode()` takes the producer
# constructor and its arguments, and exposes it as an RPC server.
# `program.add_node(lp.CourierNode(...))` returns a handle to this server.
# These handles can then be passed to other nodes.
producers = [
program.add_node(lp.CourierNode(Producer)) for _ in range(num_producers)
]
# Launch a single consumer that connects to the list of producers.
# Note: The use of `label` here actually creates a group with one single node.
node = lp.CourierNode(Consumer, producers=producers)
program.add_node(node, label='consumer')
return program
| launchpad-master | launchpad/examples/consumer_producers/program.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launchpad integration test for the consumer_producers example."""
from absl.testing import absltest
import launchpad as lp
from launchpad.examples.consumer_producers.program import make_program
class LaunchTest(absltest.TestCase):
def test_consumer_steps(self):
"""Runs the program and makes sure the consumer can run 10 steps."""
program = make_program(num_producers=2)
# Retrieve the consumer node from the program. Nodes are organized as a
# mapping of label->nodes, stored as a dict in `program.groups`
(consumer_node,) = program.groups['consumer']
# Disable the automatic execution of its `run()` method.
consumer_node.disable_run() # pytype: disable=attribute-error
# Launch all workers declared by the program. Remember to set the launch
# type here (test & multithreaded).
lp.launch(program, launch_type='test_mt', test_case=self)
# Dereference `consumer_node`'s courier handle explicitly to obtain courier
# client of it.
consumer = consumer_node.create_handle().dereference()
# Success criteria for this integration test defined as consumer being
# able to take 10 steps.
for _ in range(10):
consumer.step()
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/examples/consumer_producers/launch_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/examples/batching/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
R"""This example shows how to introduce batched handlers.
It is useful for centralised inference handled in Python.
"""
import time
from absl import app
from absl import flags
from absl import logging
import launchpad as lp
import numpy as np
_NUM_CLIENTS = flags.DEFINE_integer('num_clients', 2, 'The number of clients.')
class Client:
"""A simple client that calls batched server method."""
def __init__(self, client_id: int, server: lp.CourierClient) -> None:
"""Initializes a Client.
Args:
client_id: Id of the client.
server: Server's handler.
"""
self._client_id = client_id
self._server = server
def run(self) -> None:
"""Entry point of the client."""
for x in range(self._client_id, self._client_id + 10):
result = self._server.compute([x, x + 1])
logging.info('Result: %s', result)
time.sleep(5)
lp.stop()
class Server:
"""A simple server which sums ."""
def __init__(self, batch_size) -> None:
self._sum = None
self.compute = lp.batched_handler(batch_size=batch_size)(self.compute)
def compute(self, values):
result = np.sum(values, axis=0)
if self._sum is None:
self._sum = result
else:
self._sum += result
return np.tile(self._sum, (len(values), 1))
def make_program() -> lp.Program:
"""Define the program topology."""
program = lp.Program('batching')
batch_size = _NUM_CLIENTS.value
# In case of big batch size it is important to set thread_pool_size to
# prevent deadlocks (Courier uses synchronous GRPC server currently...).
server = program.add_node(
lp.CourierNode(
Server, batch_size, courier_kwargs={'thread_pool_size': batch_size}),
label='server')
with program.group('clients'):
for client_id in range(_NUM_CLIENTS.value):
program.add_node(lp.CourierNode(Client, client_id, server))
return program
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
program = make_program()
lp.launch(program)
if __name__ == '__main__':
app.run(main)
| launchpad-master | launchpad/examples/batching/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/examples/program_wait/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example presents clean node termination."""
import threading
import time
from absl import app
from absl import logging
import launchpad as lp
def _sleep():
while not lp.wait_for_stop(1):
logging.info('Sleeping again...')
logging.info('Clean termination of _sleep node')
time.sleep(2)
def _wait_for_stop():
lp.wait_for_stop()
logging.info('Clean termination of _wait_for_stop node')
time.sleep(2)
def _stop_event():
lp.stop_event().wait()
logging.info('Clean termination of _stop_event node')
time.sleep(2)
def _register_stop_handler():
"""Showcases the use of lp.register_stop_handler."""
stop = threading.Event()
def _stop_handler():
logging.info('_stop_handler called')
stop.set()
lp.register_stop_handler(_stop_handler)
stop.wait()
logging.info('Clean termination of _register_stop_handler node')
time.sleep(2)
def _stop_program():
time.sleep(4)
lp.stop()
def make_program() -> lp.Program:
"""Define the distributed program topology."""
program = lp.Program('program_wait')
program.add_node(lp.CourierNode(_sleep), label='sleep')
program.add_node(lp.CourierNode(_wait_for_stop), label='_wait_for_stop')
program.add_node(lp.CourierNode(_stop_event), label='_stop_event')
program.add_node(lp.CourierNode(_register_stop_handler),
label='_register_stop_handler')
program.add_node(lp.CourierNode(_stop_program), label='_stop_program')
return program
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
program = make_program()
controller = lp.launch(program)
if not controller:
logging.info('Waiting for program termination is not supported.')
return
controller.wait()
logging.info('Program finished.')
if __name__ == '__main__':
app.run(main)
| launchpad-master | launchpad/examples/program_wait/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stops a Launchpad program."""
import functools
import os
import signal
import sys
from typing import Union
from absl import logging
from launchpad import context
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
def _stop_vertex_ai(mark_as_completed=False):
del mark_as_completed
from google.cloud import aiplatform
from google.api_core import exceptions
aiplatform.init(project=os.environ['CLOUD_ML_PROJECT_ID'])
try:
aiplatform.CustomJob.get(os.environ['CLOUD_ML_JOB_ID']).cancel()
except exceptions.FailedPrecondition:
# Experiment could have been already cancelled.
pass
def _ask_launcher_for_termination(launcher_process_id, mark_as_completed=False):
del mark_as_completed
os.kill(launcher_process_id, signal.SIGTERM)
def make_program_stopper(launch_type: Union[str, context.LaunchType]):
"""Returns a callable that stops the Launchpad program.
Args:
launch_type: launch_type with which the program stopper is used.
Returns:
A callable. When called, it stops the running program.
"""
launch_type = context.LaunchType(launch_type)
def _stop_mt():
worker_manager_v2.get_worker_manager().stop_event.set()
if launch_type is context.LaunchType.TEST_MULTI_THREADING:
if lp_flags.LP_WORKER_MANAGER_V2.value:
return _stop_mt
else:
def _stop():
worker_manager.get_worker_manager()._sigterm()
return _stop
if (launch_type is context.LaunchType.LOCAL_MULTI_THREADING and
lp_flags.LP_WORKER_MANAGER_V2.value):
return _stop_mt
if (launch_type in [
context.LaunchType.LOCAL_MULTI_PROCESSING,
context.LaunchType.TEST_MULTI_PROCESSING
] and lp_flags.LP_WORKER_MANAGER_V2.value):
def _sigint_to_launcher(launcher_process_id, mark_as_completed=False):
del mark_as_completed
# Here we send a SIGINT to the launcher process for simplicity, but it
# will bring down all programs launched from that process. A better
# approach is to communicate to the launcher process through a unix pipe
# (named after the launcher pid), so that the launcher process will only
# kill subprocesses associated with the specific program.
os.kill(launcher_process_id, signal.SIGINT)
# In local_mp, we treat lp.stop() as a user-requested stop. The reason is
# that it provides convenience in observing preemption handling logic (using
# `lp.wait_for_stop()` or the stop event) being triggered locally.
return functools.partial(_sigint_to_launcher, os.getpid())
if launch_type in [
context.LaunchType.LOCAL_MULTI_PROCESSING,
context.LaunchType.LOCAL_MULTI_THREADING,
context.LaunchType.TEST_MULTI_PROCESSING,
]:
return functools.partial(_ask_launcher_for_termination, os.getpid())
if launch_type in [context.LaunchType.VERTEX_AI]:
return _stop_vertex_ai
raise NotImplementedError(f'{launch_type} is not yet supported.')
| launchpad-master | launchpad/program_stopper/program_stopper.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/program_stopper/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from setuptools import find_namespace_packages
from setuptools import setup
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
def test_suite():
test_loader = unittest.TestLoader()
all_tests = test_loader.discover('jax_verify/tests',
pattern='*_test.py')
return all_tests
setup(
name='jax_verify',
version='1.0',
description='A library for neural network verification.',
url='https://github.com/deepmind/jax_verify',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
requires_python='>=3.6',
platforms=['any'],
license='Apache 2.0',
test_suite='setup.test_suite',
include_package_data=True,
zip_safe=False,
)
| jax_verify-master | setup.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import inspect
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
import jax_verify
# -- Project information -----------------------------------------------------
project = 'jax_verify'
copyright = '2020, DeepMind' # pylint: disable=redefined-builtin
author = 'DeepMind'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
# 'collapse_navigation': False,
# 'sticky_navigation': False,
}
# -- Source code links -------------------------------------------------------
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
except ImportError:
return None
obj = mod
try:
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
except AttributeError:
return None
else:
obj = inspect.unwrap(obj)
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
# TODO: support tags after we release an initial version.
return 'https://github.com/deepmind/jax_verify/blob/master/jax_verify/%s#L%d#L%d' % (
os.path.relpath(filename, start=os.path.dirname(
jax_verify.__file__)), lineno, lineno + len(source) - 1)
| jax_verify-master | docs/conf.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bound propagation example usage: IBP, Fastlin, CROWN, CROWN-IBP.
Examples:
python3 run_boundprop.py
python3 run_boundprop.py --model=cnn
python3 run_boundprop.py --boundprop_method=fastlin_bound_propagation
"""
import functools
import pickle
from absl import app
from absl import flags
from absl import logging
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.sdp_verify import utils
import numpy as np
MLP_PATH = 'models/raghunathan18_pgdnn.pkl'
CNN_PATH = 'models/mnist_wongsmall_eps_10_adv.pkl'
ALL_BOUNDPROP_METHODS = (
jax_verify.interval_bound_propagation,
jax_verify.forward_fastlin_bound_propagation,
jax_verify.backward_fastlin_bound_propagation,
jax_verify.ibpforwardfastlin_bound_propagation,
jax_verify.forward_crown_bound_propagation,
jax_verify.backward_crown_bound_propagation,
jax_verify.crownibp_bound_propagation,
)
flags.DEFINE_string('model', 'mlp', 'mlp or cnn')
flags.DEFINE_string('boundprop_method', '',
'Any boundprop method, such as `interval_bound_propagation`'
' `forward_fastlin_bound_propagation` or '
' `crown_bound_propagation`.'
'Empty string defaults to IBP.')
FLAGS = flags.FLAGS
def load_model(model_name):
"""Load model parameters and prediction function."""
# Choose appropriate prediction function
if model_name == 'mlp':
model_path = MLP_PATH
def model_fn(params, inputs):
inputs = np.reshape(inputs, (inputs.shape[0], -1))
return utils.predict_mlp(params, inputs)
elif model_name == 'cnn':
model_path = CNN_PATH
model_fn = utils.predict_cnn
else:
raise ValueError('')
# Load parameters from file
with jax_verify.open_file(model_path, 'rb') as f:
params = pickle.load(f)
return model_fn, params
def main(unused_args):
# Load some test samples
with jax_verify.open_file('mnist/x_test_first100.npy', 'rb') as f:
inputs = np.load(f)
# Load the parameters of an existing model.
model_pred, params = load_model(FLAGS.model)
# Evaluation of the model on unperturbed images.
clean_preds = model_pred(params, inputs)
# Define initial bound
eps = 0.1
initial_bound = jax_verify.IntervalBound(
jnp.minimum(jnp.maximum(inputs - eps, 0.0), 1.0),
jnp.minimum(jnp.maximum(inputs + eps, 0.0), 1.0))
# Because our function `model_pred` takes as inputs both the parameters
# `params` and the `inputs`, we need to wrap it such that it only takes
# `inputs` as parameters.
logits_fn = functools.partial(model_pred, params)
# Apply bound propagation. All boundprop methods take as an input the model
# `function`, and the inital bounds, and return final bounds with the same
# structure as the output of `function`. Internally, these methods work by
# replacing each operation with its boundprop equivalent - see
# bound_propagation.py for details.
boundprop_method = (
jax_verify.interval_bound_propagation if not FLAGS.boundprop_method else
getattr(jax_verify, FLAGS.boundprop_method))
assert boundprop_method in ALL_BOUNDPROP_METHODS, 'unsupported method'
final_bound = boundprop_method(logits_fn, initial_bound)
logging.info('Lower bound: %s', final_bound.lower)
logging.info('Upper bound: %s', final_bound.upper)
logging.info('Clean predictions: %s', clean_preds)
assert jnp.all(final_bound.lower <= clean_preds), 'Invalid lower bounds'
assert jnp.all(final_bound.upper >= clean_preds), 'Invalid upper bounds'
if __name__ == '__main__':
app.run(main)
| jax_verify-master | examples/run_boundprop.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Run SDP verification for adversarial robustness specification.
Example launch commands which achieve good results:
CIFAR10 CNN-Mix:
python3 run_sdp_verify.py --model_name=models/cifar10_wongsmall_eps2_mix.pkl \
--anneal_lengths=30000,30000,30000
MNIST CNN-Adv:
python3 run_sdp_verify.py --model_name=models/mnist_wongsmall_eps_10_adv.pkl \
--epsilon=0.1 --dataset=mnist \
--anneal_lengths=20000,20000,20000 --opt_name=adam --anneal_factor=0.03 \
--n_iter_lanczos=300
MNIST Adv-MLP:
python3 run_sdp_verify.py --epsilon=0.1 --dataset=mnist \
--model_name=models/raghunathan18_pgdnn.pkl --use_exact_eig_train=True \
--use_exact_eig_eval=True --opt_name=adam --lam_coeff=0.1 --nu_coeff=0.03 \
--custom_kappa_coeff=10000 --anneal_lengths=10000,4000,1000 \
--kappa_zero_after=2000
"""
import functools
import os
import pickle
from absl import app
from absl import flags
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify import sdp_verify
from jax_verify.extensions.sdp_verify import boundprop_utils
from jax_verify.extensions.sdp_verify import problem
from jax_verify.extensions.sdp_verify import utils
import numpy as np
flags.DEFINE_integer('dataset_idx', 1, 'i^th example in dataset')
flags.DEFINE_integer('target_label_idx', 0, 'which class to target?')
flags.DEFINE_float('epsilon', 2. / 255, 'attack radius')
flags.DEFINE_string('dataset', 'cifar10', 'dataset, mnist or cifar')
flags.DEFINE_string('model_name', 'models/cifar10_wongsmall_eps2_mix.pkl',
'model name specifying Pickle file with network weights')
flags.DEFINE_boolean('inception_preprocess', False,
'Use inception_preprocessing i.e. [-1,1]-scaled inputs')
flags.DEFINE_string('boundprop_type', 'crown_ibp',
'Method for obtaining initial activation bounds. '
'E.g. "crown_ibp" "nonconvex" or "ibp"')
flags.DEFINE_float('lam_coeff', 1.0, 'Coeff for dual variables')
flags.DEFINE_float('nu_coeff', 0.03, 'Coeff for dual variables')
flags.DEFINE_float('custom_kappa_coeff', -1,
'if >0, scale LR for top-left kappa')
flags.DEFINE_string('anneal_lengths', '15,5',
'comma-separated integers with # of steps per epoch')
# Flags passed directly to solver
flags.DEFINE_boolean('use_exact_eig_train', False,
'Use exact eigendecomposition for training')
flags.DEFINE_boolean('use_exact_eig_eval', False,
'Use exact eigendecomposition for evaluation')
flags.DEFINE_integer('n_iter_lanczos', 200, '# of Lanczos iters per step')
flags.DEFINE_float('eval_every', 1000, 'Iterations per log.')
flags.DEFINE_float('lr_init', 1e-3, 'initial learning rate')
flags.DEFINE_float('anneal_factor', 0.1, 'learning rate anneal factor')
flags.DEFINE_string('opt_name', 'rmsprop',
'Optix class: "adam" "sgd" or "rmsprop"')
flags.DEFINE_float('kappa_zero_after', 1e9, 'zero kappa_{1:n} after N steps')
flags.DEFINE_float('kappa_reg_weight', -1, '-1 disables kappa regularization')
FLAGS = flags.FLAGS
def _load_dataset(dataset):
"""Loads the 10000 MNIST (CIFAR) test set examples, saved as numpy arrays."""
assert dataset in ('mnist', 'cifar10'), 'invalid dataset name'
with jax_verify.open_file(os.path.join(dataset, 'x_test.npy'), 'rb') as f:
xs = np.load(f)
with jax_verify.open_file(os.path.join(dataset, 'y_test.npy'), 'rb') as f:
ys = np.load(f)
return xs, ys
def _load_weights(path):
with jax_verify.open_file(path, 'rb') as f:
data = pickle.load(f)
return data
def get_verif_instance(params, x, label, target_label, epsilon,
input_bounds=(0., 1.)):
"""Creates verif instance."""
if FLAGS.boundprop_type == 'ibp':
bounds = utils.boundprop(
params, utils.init_bound(x, epsilon, input_bounds=input_bounds))
else:
bounds = boundprop_utils.boundprop(
params, np.expand_dims(x, axis=0), epsilon, input_bounds,
FLAGS.boundprop_type)
verif_instance = utils.make_relu_robust_verif_instance(
params, bounds, target_label=target_label, label=label,
input_bounds=input_bounds)
return verif_instance
def _opt_multiplier_fn(path, kappa_index, kappa_dim=None):
"""Set adaptive learning rates."""
if FLAGS.custom_kappa_coeff > 0:
kappa_lr_mul = FLAGS.custom_kappa_coeff
if kappa_index in path:
onehot = jax.nn.one_hot([0], kappa_dim)
return onehot.at[(0, 0)].set(kappa_lr_mul)
if 'lam' in path:
return FLAGS.lam_coeff
if path == (kappa_index - 1, 'nu'):
return FLAGS.nu_coeff
return 1.0
def verify_cnn_single_dual(verif_instance):
"""Run verification for a CNN on a single MNIST/CIFAR problem."""
verif_instance = problem.make_sdp_verif_instance(verif_instance)
solver_params = dict(
use_exact_eig_train=FLAGS.use_exact_eig_train,
use_exact_eig_eval=FLAGS.use_exact_eig_eval,
n_iter_lanczos=FLAGS.n_iter_lanczos,
eval_every=FLAGS.eval_every,
opt_name=FLAGS.opt_name,
anneal_factor=FLAGS.anneal_factor,
lr_init=FLAGS.lr_init,
kappa_zero_after=FLAGS.kappa_zero_after,
kappa_reg_weight=FLAGS.kappa_reg_weight,
)
# Set schedule
steps_per_anneal = [int(x) for x in FLAGS.anneal_lengths.split(',')]
num_steps = sum(steps_per_anneal)
solver_params['steps_per_anneal'] = steps_per_anneal[:-1] + [int(1e9)]
# Set learning rate multipliers
kappa_shape = verif_instance.dual_shapes[-1]
kappa_index = len(verif_instance.dual_shapes) - 1
assert len(kappa_shape) == 2 and kappa_shape[0] == 1
opt_multiplier_fn = functools.partial(
_opt_multiplier_fn, kappa_index=kappa_index, kappa_dim=kappa_shape[1])
# Call solver
obj_value, info = sdp_verify.solve_sdp_dual(
verif_instance,
num_steps=num_steps,
verbose=True,
opt_multiplier_fn=opt_multiplier_fn,
**solver_params)
info['final_dual_vars'] = jax.tree_map(np.array, info['final_dual_vars'])
return float(obj_value), info
class PickleWriter:
def write(self, d):
with open('/tmp/run_sdp_verify_results.pkl', 'wb') as f:
pickle.dump(d, f)
def main(unused_argv):
run_verification(PickleWriter())
def run_verification(writer):
"""Run verification."""
xs, ys = _load_dataset(FLAGS.dataset)
dataset_idx = FLAGS.dataset_idx
if FLAGS.dataset == 'cifar10':
x = utils.preprocess_cifar(xs[dataset_idx])
epsilon, input_bounds = utils.preprocessed_cifar_eps_and_input_bounds(
shape=x.shape, epsilon=FLAGS.epsilon,
inception_preprocess=FLAGS.inception_preprocess)
else:
x = xs[dataset_idx]
epsilon = FLAGS.epsilon
input_bounds = (0., 1.)
true_label = ys[dataset_idx]
target_label = FLAGS.target_label_idx
params = _load_weights(FLAGS.model_name)
if isinstance(params[0], dict):
params[0]['input_shape'] = x.shape[0]
verif_instance = get_verif_instance(
params, x, label=true_label, target_label=target_label,
epsilon=epsilon, input_bounds=input_bounds)
# Report initial bound from interval bounds.
ibp_bound = utils.ibp_bound_elided(verif_instance)
print('IBP bound:', ibp_bound)
if true_label == target_label:
return
# Run dual SDP verification.
verified_ub, info = verify_cnn_single_dual(verif_instance)
# Run FGSM eval.
model_fn = lambda x: utils.predict_cnn(params, jnp.expand_dims(x, axis=0))
x_adv = utils.fgsm_single(
model_fn, x, true_label, target_label, input_bounds=input_bounds,
epsilon=epsilon, num_steps=100, step_size=0.001)
adv_objective = float(
utils.adv_objective(model_fn, x_adv, true_label, target_label))
print('adv_objective :', adv_objective)
output_dict = {
'dataset_idx': dataset_idx,
'true_label': true_label,
'target_label': target_label,
'epsilon': FLAGS.epsilon,
'verified_ub': verified_ub,
'adv_lb': adv_objective,
'adv_success': adv_objective > 0.0,
'ibp_bound': ibp_bound,
}
output_dict.update(info)
jax_to_np = lambda x: np.array(x) if isinstance(x, jax.Array) else x
output_dict = jax.tree_map(jax_to_np, output_dict)
writer.write(output_dict)
if __name__ == '__main__':
app.run(main)
| jax_verify-master | examples/run_sdp_verify.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run verification with out-of-the-box LP solver.
This example uses jax_verify to generate Linear Program (LP) constraints
expressed in CVXPY, which is then solved with a generic LP solver.
Note that this CVXPY example is purely illustrative - it incurs a large overhead
for defining the problem, since CVXPY struggles with the large number of
constraints, particularly with convolutional layers. We will release more
performant implementations with other LP solvers in the future. We also welcome
contributions.
"""
import functools
import pickle
from absl import app
from absl import flags
from absl import logging
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.sdp_verify import utils
from jax_verify.src.linear import forward_linear_bounds
from jax_verify.src.mip_solver.solve_relaxation import solve_planet_relaxation
import numpy as np
MLP_PATH = 'models/raghunathan18_pgdnn.pkl'
CNN_PATH = 'models/mnist_wongsmall_eps_10_adv.pkl'
flags.DEFINE_string('model', 'mlp', 'mlp or cnn or toy')
flags.DEFINE_string('boundprop_method', 'ibp', 'ibp or fastlin')
FLAGS = flags.FLAGS
def load_model(model_name):
"""Load model parameters and prediction function."""
# Choose appropriate prediction function
if model_name in ('mlp', 'toy'):
model_path = MLP_PATH
def model_fn(params, inputs):
inputs = np.reshape(inputs, (inputs.shape[0], -1))
return utils.predict_mlp(params, inputs)
elif model_name == 'cnn':
model_path = CNN_PATH
model_fn = utils.predict_cnn
else:
raise ValueError('')
# Get parameters
if model_name == 'toy':
params = [
(np.random.normal(size=(784, 2)), np.random.normal(size=(2,))),
(np.random.normal(size=(2, 10)), np.random.normal(size=(10,))),
]
else:
with jax_verify.open_file(model_path, 'rb') as f:
params = pickle.load(f)
return model_fn, params
def main(unused_args):
# Load the parameters of an existing model.
model_pred, params = load_model(FLAGS.model)
logits_fn = functools.partial(model_pred, params)
# Load some test samples
with jax_verify.open_file('mnist/x_test_first100.npy', 'rb') as f:
inputs = np.load(f)
# Compute boundprop bounds
eps = 0.1
lower_bound = jnp.minimum(jnp.maximum(inputs[:2, ...] - eps, 0.0), 1.0)
upper_bound = jnp.minimum(jnp.maximum(inputs[:2, ...] + eps, 0.0), 1.0)
init_bound = jax_verify.IntervalBound(lower_bound, upper_bound)
if FLAGS.boundprop_method == 'forwardfastlin':
final_bound = jax_verify.forward_fastlin_bound_propagation(logits_fn,
init_bound)
boundprop_transform = forward_linear_bounds.forward_fastlin_transform
elif FLAGS.boundprop_method == 'ibp':
final_bound = jax_verify.interval_bound_propagation(logits_fn, init_bound)
boundprop_transform = jax_verify.ibp_transform
else:
raise NotImplementedError('Only ibp/fastlin boundprop are'
'currently supported')
dummy_output = model_pred(params, inputs)
# Run LP solver
objective = jnp.where(jnp.arange(dummy_output[0, ...].size) == 0,
jnp.ones_like(dummy_output[0, ...]),
jnp.zeros_like(dummy_output[0, ...]))
objective_bias = 0.
value, _, status = solve_planet_relaxation(
logits_fn, init_bound, boundprop_transform, objective,
objective_bias, index=0)
logging.info('Relaxation LB is : %f, Status is %s', value, status)
value, _, status = solve_planet_relaxation(
logits_fn, init_bound, boundprop_transform, -objective,
objective_bias, index=0)
logging.info('Relaxation UB is : %f, Status is %s', -value, status)
logging.info('Boundprop LB is : %f', final_bound.lower[0, 0])
logging.info('Boundprop UB is : %f', final_bound.upper[0, 0])
if __name__ == '__main__':
app.run(main)
| jax_verify-master | examples/run_lp_solver.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to perform verification on Neural Networks."""
from jax_verify.src.bound_propagation import IntervalBound
from jax_verify.src.ibp import bound_transform as ibp_transform
from jax_verify.src.ibp import interval_bound_propagation
from jax_verify.src.intersection import IntersectionBoundTransform
from jax_verify.src.linear.backward_crown import backward_crown_bound_propagation
from jax_verify.src.linear.backward_crown import backward_fastlin_bound_propagation
from jax_verify.src.linear.backward_crown import crownibp_bound_propagation
from jax_verify.src.linear.forward_linear_bounds import forward_crown_bound_propagation
from jax_verify.src.linear.forward_linear_bounds import forward_fastlin_bound_propagation
from jax_verify.src.linear.forward_linear_bounds import ibpforwardfastlin_bound_propagation
from jax_verify.src.nonconvex.methods import nonconvex_constopt_bound_propagation
from jax_verify.src.nonconvex.methods import nonconvex_ibp_bound_propagation
from jax_verify.src.utils import open_file
| jax_verify-master | jax_verify/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for SDP verification of neural networks."""
from jax_verify.extensions.sdp_verify.sdp_verify import dual_fun
from jax_verify.extensions.sdp_verify.sdp_verify import solve_sdp_dual
from jax_verify.extensions.sdp_verify.sdp_verify import solve_sdp_dual_simple
from jax_verify.extensions.sdp_verify.utils import SdpDualVerifInstance
__all__ = (
"dual_fun",
"SdpDualVerifInstance",
"solve_sdp_dual",
"solve_sdp_dual_simple",
)
| jax_verify-master | jax_verify/sdp_verify.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for the support of branching constraints."""
from absl.testing import absltest
import jax
from jax import numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src import bound_utils
from jax_verify.src import concretization
from jax_verify.src import optimizers
from jax_verify.src import synthetic_primitives
from jax_verify.src.branching import branch_selection
from jax_verify.src.linear import backward_crown
from jax_verify.src.linear import backward_linearbounds_with_branching as blwb
from jax_verify.src.linear import linear_relaxations
from jax_verify.tests import test_utils
import numpy as np
import optax
def _find_linear_layers_indexes(fun, input_bound):
"""Find the index of linear layers.
We don't require the branching to be on those, but it is easy to reason about
them so that's what the tests are doing.
Args:
fun: Function to be bounded.
input_bound: Input bound to the function
Returns:
linear_layers_indexes: Indexes of all the linear layers.
"""
graph_inspector = bound_utils.GraphInspector()
inspector_algorithm = bound_propagation.ForwardPropagationAlgorithm(
graph_inspector)
bound_propagation.bound_propagation(inspector_algorithm, fun, input_bound)
return [node.index for node in graph_inspector.nodes.values()
if node.primitive == synthetic_primitives.linear_p]
class LinearBoundBranchingTest(absltest.TestCase):
def test_linear_network(self):
"""Test the imposition of constraints.
We are going to take a network that is performing the sum of the inputs.
All inputs are contained between [-1, 1].
We will impose the constraints that the output is greater than zero, and
that it is smaller than 2. as branching constraints, and check that this
will be tighten the bounds.
"""
def lin_network(x):
return x.sum()
input_bound = jax_verify.IntervalBound(-jnp.ones((3,)), jnp.ones((3,)))
# Let's first check we get the expected bounds.
unbranched_bounds = jax_verify.backward_crown_bound_propagation(
lin_network, input_bound)
self.assertAlmostEqual(unbranched_bounds.lower, -3.)
self.assertAlmostEqual(unbranched_bounds.upper, 3.)
lin_layer_indexes = _find_linear_layers_indexes(lin_network, input_bound)
self.assertLen(lin_layer_indexes, 1)
layer_index = lin_layer_indexes[0]
## First block of test: imposing restrictive constraints.
tighter_branching_decisions_list = [
# In layer `layer_index`, neuron 0 is greater than 0.
branch_selection.BranchDecision(layer_index, 0, 0., 1),
# In layer `layer_index`, neuron 0 is smaller than 2.
branch_selection.BranchDecision(layer_index, 0, 2., -1),
]
nb_steps = 100
slope_ss_schedule = optax.exponential_decay(1e-2, 1, 0.95)
lag_ss_schedule = optax.exponential_decay(1, 1, 0.95)
slope_opt = optax.adam(slope_ss_schedule)
lag_opt = optax.adam(lag_ss_schedule)
tighter_branching_decisions_tensors = branch_selection.branching_decisions_tensors(
tighter_branching_decisions_list, 5, 4)
tighter_branched_bounds = blwb.lagrangian_backward_linear_compute_bounds(
slope_opt, lag_opt, nb_steps, lin_network,
tighter_branching_decisions_tensors, input_bound)
# Let's check that the bounds have improved.
self.assertGreater(tighter_branched_bounds.lower, unbranched_bounds.lower)
self.assertLess(tighter_branched_bounds.upper, unbranched_bounds.upper)
# Let's check that the bounds have not become better than they should be.
self.assertLessEqual(tighter_branched_bounds.lower, 0.)
self.assertGreaterEqual(tighter_branched_bounds.upper, 2.)
## Second block of test: imposing useless constraints.
useless_branching_decisions_list = [
# In layer `layer_index`, neuron 0 is greater than -5.
branch_selection.BranchDecision(layer_index, 0, -5., 1),
# In layer `layer_index`, neuron 0 is smaller than 5.
branch_selection.BranchDecision(layer_index, 0, 5., -1),
]
useless_branching_decisions_tensors = branch_selection.branching_decisions_tensors(
useless_branching_decisions_list, 5, 4)
useless_branched_bounds = blwb.lagrangian_backward_linear_compute_bounds(
slope_opt, lag_opt, nb_steps, lin_network,
useless_branching_decisions_tensors, input_bound)
# Let's check that the bounds are still valid.
self.assertLessEqual(useless_branched_bounds.lower, -3.)
self.assertGreaterEqual(useless_branched_bounds.upper, 3.)
## Third block of test: Verifying that even with a bad optimizer, we keep
## valid bounds.
bad_opt = optax.adam(-10)
bad_opt_bounds = blwb.lagrangian_backward_linear_compute_bounds(
bad_opt, bad_opt, nb_steps, lin_network,
useless_branching_decisions_tensors, input_bound)
# Let's check that the bounds are still valid.
self.assertLessEqual(bad_opt_bounds.lower, -3.)
self.assertGreaterEqual(bad_opt_bounds.upper, 3.)
def test_relu_network(self):
"""Test the imposition of constraints in an intermediate ReLU layer.
We're going to design the network in a way to be able to reason easily
about bounds and branching decisions.
2 inputs: x, y
4 hidden units: (x+y, x+y, -x-y, -x-y)
The four hidden units go through a ReLU
Final layer weights of (1, -1, 1, -1)
The output of the network is always 0, and the hidden units are all
correlated. However, if you do a convex or linear relaxation of the network,
you are not going to get tight bounds (the ReLU will have to be relaxed, and
given that they have different signs on their output, some will be set at
their lower bound while some will be set at their upper bound, making the
bound non zero).
"""
def relu_net(inp):
lin_1_weight = jnp.array([[1., -1.],
[1., -1.],
[-1., 1.],
[-1., 1.]])
lin_2_weight = jnp.array([[1., -1., 1., -1.]])
return lin_2_weight @ (jax.nn.relu(lin_1_weight @ inp))
input_bound = jax_verify.IntervalBound(-jnp.ones((2,)),
jnp.ones((2,)))
# Let's first check we get the expected bounds.
unbranched_bounds = jax_verify.backward_crown_bound_propagation(
relu_net, input_bound)
self.assertGreater(unbranched_bounds.upper, 1.)
self.assertLess(unbranched_bounds.lower, -1.)
lin_layer_indexes = _find_linear_layers_indexes(relu_net, input_bound)
self.assertLen(lin_layer_indexes, 2)
ini_layer_index = lin_layer_indexes[0]
upper_branching_decisions_list = [
# In layer `ini_layer_index`, neuron 0 is greater than 0.
branch_selection.BranchDecision(ini_layer_index, 0, 0., 1),
]
# As we impose this constraint, this should ideally impose also the
# constaint that the second neuron is greater than 0. too (they have the
# same coefficients.) Similarly, it should force the others neurons (3 and
# 4) to be smaller than 0.
# As a result, all the ReLUs would be fixed, which means that there would be
# no looseness, and the bound verification method should obtain a tight
# bound.
nb_steps = 100
slope_ss_schedule = optax.exponential_decay(1e-2, 1, 0.95)
lag_ss_schedule = optax.exponential_decay(1, 1, 0.95)
slope_opt = optax.adam(slope_ss_schedule)
lag_opt = optax.adam(lag_ss_schedule)
upper_branching_decisions_tensors = branch_selection.branching_decisions_tensors(
upper_branching_decisions_list, 3, 4)
upper_branched_bounds = blwb.lagrangian_backward_linear_compute_bounds(
slope_opt, lag_opt, nb_steps, relu_net,
upper_branching_decisions_tensors, input_bound)
self.assertAlmostEqual(upper_branched_bounds.lower, 0., delta=1e-4)
self.assertAlmostEqual(upper_branched_bounds.upper, 0., delta=1e-4)
# Let's also impose the lower equivalent constraint (as if we were doing a
# branch-and bound process.)
lower_branching_decisions_list = [
# In layer `ini_layer_index`, neuron 0 is smaller than 0.
branch_selection.BranchDecision(ini_layer_index, 0, 0., -1),
]
lower_branching_decisions_tensors = branch_selection.branching_decisions_tensors(
lower_branching_decisions_list, 3, 4)
lower_branched_bounds = blwb.lagrangian_backward_linear_compute_bounds(
slope_opt, lag_opt, nb_steps, relu_net,
lower_branching_decisions_tensors, input_bound)
self.assertAlmostEqual(lower_branched_bounds.lower, 0., delta=1e-4)
self.assertAlmostEqual(lower_branched_bounds.upper, 0., delta=1e-4)
# Let's also test out what happens when we have unsatisfiable constraints
# that we add. In practice, this should result in the lower bounds going to
# +infinity and the upper bounds going to -infinity.
# This can be observed by them crossing.
impossible_branching_decisions_list = [
# In layer `layer_index`, neuron 0 (x-y) is greater than 1.
branch_selection.BranchDecision(ini_layer_index, 0, 1., 1),
# In layer `layer_index`, neuron 1 (x-y) is smaller than -1.
branch_selection.BranchDecision(ini_layer_index, 1, -1., -1),
]
impossible_branching_decisions_tensors = branch_selection.branching_decisions_tensors(
impossible_branching_decisions_list, 2, 4)
nb_steps_bounds = blwb.lagrangian_backward_linear_compute_bounds(
slope_opt, lag_opt, nb_steps, relu_net,
impossible_branching_decisions_tensors, input_bound)
nb_steps_times_2_bounds = blwb.lagrangian_backward_linear_compute_bounds(
slope_opt, lag_opt, 2 * nb_steps, relu_net,
impossible_branching_decisions_tensors, input_bound)
# Observe that the bounds are crossing.
self.assertGreater(nb_steps_bounds.lower, nb_steps_bounds.upper)
self.assertGreater(nb_steps_times_2_bounds.lower,
nb_steps_times_2_bounds.upper)
# Make sure that the bounds are diverging.
self.assertGreater(nb_steps_times_2_bounds.lower, nb_steps_bounds.lower)
self.assertLess(nb_steps_times_2_bounds.upper, nb_steps_bounds.upper)
def test_nobranch_noimpact(self):
"""When we impose no branching decisions, we should match alpha-crown."""
architecture = [16, 8, 8, 2]
problem_key = jax.random.PRNGKey(42)
fun, (lb, ub) = test_utils.set_up_toy_problem(problem_key, 2, architecture)
nb_steps = 100
slope_ss_schedule = optax.exponential_decay(1e-2, 1, 0.95)
lag_ss_schedule = optax.exponential_decay(1, 1, 0.95)
slope_opt = optax.adam(slope_ss_schedule)
lag_opt = optax.adam(lag_ss_schedule)
slope_optimizer = optimizers.OptaxOptimizer(slope_opt, num_steps=nb_steps)
branch_optimizer = optimizers.OptaxOptimizer(
blwb.slope_and_lagrangian_optimizer(slope_opt, lag_opt),
num_steps=nb_steps)
reference_transform = backward_crown.OptimizingLinearBoundBackwardTransform(
linear_relaxations.parameterized_relaxer,
backward_crown.CONCRETIZE_ARGS_PRIMITIVE, slope_optimizer)
reference_concretizer = concretization.ChunkedBackwardConcretizer(
reference_transform, max_chunk_size=0)
reference_algorithm = concretization.BackwardConcretizingAlgorithm(
reference_concretizer)
branching_decisions_list = []
branching_decisions_tensor = branch_selection.branching_decisions_tensors(
branching_decisions_list, 2, 4)
nobranch_transform = blwb.BranchedOptimizingLinearBoundBackwardTransform(
branching_decisions_tensor, linear_relaxations.parameterized_relaxer,
backward_crown.CONCRETIZE_ARGS_PRIMITIVE, branch_optimizer)
nobranch_concretizer = concretization.ChunkedBackwardConcretizer(
nobranch_transform, max_chunk_size=0)
nobranch_algorithm = concretization.BackwardConcretizingAlgorithm(
nobranch_concretizer)
inp_bound = jax_verify.IntervalBound(lb, ub)
reference_bound, _ = bound_propagation.bound_propagation(
reference_algorithm, fun, inp_bound)
nobranch_bound, _ = bound_propagation.bound_propagation(
nobranch_algorithm, fun, inp_bound)
np.testing.assert_array_equal(reference_bound.upper, nobranch_bound.upper) # pytype: disable=attribute-error # jax-ndarray
np.testing.assert_array_equal(reference_bound.lower, nobranch_bound.lower) # pytype: disable=attribute-error # jax-ndarray
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/backward_linearbounds_with_branching_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for CrownIBP."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
class CrownIBPBoundTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0., delta=1e-5)
def test_fc_crownibp(self):
@hk.without_apply_rng
@hk.transform
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
fun = functools.partial(linear_model.apply, params)
output_bounds = jax_verify.crownibp_bound_propagation(
fun, input_bounds)
self.assertAlmostEqual(5., output_bounds.lower)
self.assertAlmostEqual(11., output_bounds.upper)
def test_conv2d_crownibp(self):
@hk.without_apply_rng
@hk.transform
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv2d_model.apply, params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.crownibp_bound_propagation(
fun, input_bounds)
self.assertAlmostEqual(8., output_bounds.lower)
self.assertAlmostEqual(16., output_bounds.upper)
def test_relu_crownibp(self):
def relu_model(inp):
return jax.nn.relu(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.crownibp_bound_propagation(
relu_model, input_bounds)
self.assertArrayAlmostEqual(jnp.array([[0., 2.]]), output_bounds.lower)
self.assertArrayAlmostEqual(jnp.array([[0., 4.]]), output_bounds.upper)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/crownibp_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils functions for writing jax_verify tests."""
import functools
from typing import Tuple
import jax
import jax.numpy as jnp
from jax_verify.extensions.sdp_verify import utils
from jax_verify.src import opt_utils
from jax_verify.tests.sdp_verify import test_utils as sdp_test_utils
def sample_bounds(key: jnp.ndarray,
shape: Tuple[int, ...],
minval: float = -2.,
maxval: float = 2.) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Sample some bounds of the required shape.
Args:
key: Random number generator.
shape: Shape of the bounds to generate.
minval: Optional, smallest value that the bounds could take.
maxval: Optional, largest value that the bounds could take.
Returns:
lb, ub: Lower and upper bound tensor
"""
key_0, key_1 = jax.random.split(key)
bound_1 = jax.random.uniform(key_0, shape, minval=minval, maxval=maxval)
bound_2 = jax.random.uniform(key_1, shape, minval=minval, maxval=maxval)
lb = jnp.minimum(bound_1, bound_2)
ub = jnp.maximum(bound_1, bound_2)
return lb, ub
def sample_bounded_points(key: jnp.ndarray,
bounds: Tuple[jnp.ndarray, jnp.ndarray],
nb_points: int,
axis: int = 0) -> jnp.ndarray:
"""Sample uniformly some point respecting the bounds.
Args:
key: Random number generator
bounds: Tuple containing [lower bound, upper bound]
nb_points: How many points to sample.
axis: Which dimension to add to correspond to the number of points.
Returns:
points: Points contained between the given bounds.
"""
lb, ub = bounds
act_shape = lb.shape
to_sample_shape = act_shape[:axis] + (nb_points,) + act_shape[axis:]
unif_samples = jax.random.uniform(key, to_sample_shape)
broad_lb = jnp.expand_dims(lb, axis)
broad_ub = jnp.expand_dims(ub, axis)
bound_range = broad_ub - broad_lb
return broad_lb + unif_samples * bound_range
def sample_bounded_simplex_points(key: jnp.ndarray,
bounds: Tuple[jnp.ndarray, jnp.ndarray],
simplex_sum: float,
nb_points: int) -> jnp.ndarray:
"""Sample some points respecting the bounds as well as a simplex constraint.
Args:
key: Random number generator
bounds: Tuple containing [lower bound, upper bound].
simplex_sum: Value that each datapoint should sum to.
nb_points: How many points to sample.
Returns:
Points contained between the given bounds.
"""
lb, ub = bounds
points = sample_bounded_points(key, bounds, nb_points)
project_fun = functools.partial(opt_utils.project_onto_interval_simplex,
lb, ub, simplex_sum)
batch_project_fun = jax.vmap(project_fun)
return batch_project_fun(points)
def set_up_toy_problem(rng_key, batch_size, architecture):
key_1, key_2 = jax.random.split(rng_key)
params = sdp_test_utils.make_mlp_params(architecture, key_2)
inputs = jax.random.uniform(key_1, (batch_size, architecture[0]))
eps = 0.1
lb = jnp.maximum(jnp.minimum(inputs - eps, 1.), 0.)
ub = jnp.maximum(jnp.minimum(inputs + eps, 1.), 0.)
fun = functools.partial(utils.predict_cnn, params)
return fun, (lb, ub)
| jax_verify-master | jax_verify/tests/test_utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for propagating bounds through the network."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
import tree
@hk.without_apply_rng
@hk.transform
def sequential_model(inp):
net = hk.Sequential([
hk.Linear(5), jax.nn.relu,
hk.Linear(2), jax.nn.relu,
hk.Linear(1)
])
return net(inp)
def residual_model_all_act(inp):
mod1 = hk.Linear(5)
mod2 = hk.Linear(5)
mod3 = hk.Linear(1)
act_0 = inp
act_1 = mod1(act_0)
act_2 = jax.nn.relu(act_1)
act_3 = mod2(act_2)
act_4 = jax.nn.relu(act_3)
act_5 = act_2 + act_4 # Residual
final_act = mod3(act_5)
return [act_0, act_1, act_2, act_3, act_4, act_5, final_act]
@hk.without_apply_rng
@hk.transform
def single_element_list_model(inp):
all_acts = residual_model_all_act(inp)
return [all_acts[-1]]
@hk.without_apply_rng
@hk.transform
def dict_output_model(inp):
all_acts = residual_model_all_act(inp)
return {i: act for i, act in enumerate(all_acts)}
@hk.without_apply_rng
@hk.transform
def residual_model_intermediate(inp):
return residual_model_all_act(inp)
@hk.without_apply_rng
@hk.transform
def residual_model(inp):
all_acts = residual_model_all_act(inp)
return all_acts[-1]
class BoundPropagationTest(parameterized.TestCase):
@parameterized.named_parameters(
('Sequential', sequential_model),
('Residual', residual_model)
)
def test_model_structure_nostate(self, model):
z = jnp.array([[1., 2., 3.]])
params = model.init(jax.random.PRNGKey(1), z)
input_bounds = jax_verify.IntervalBound(z - 1.0, z + 1.0)
fun_to_prop = functools.partial(model.apply, params)
output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, input_bounds)
self.assertTrue(all(output_bounds.upper >= output_bounds.lower))
def test_multioutput_model(self):
z = jnp.array([[1., 2., 3.]])
fun = hk.without_apply_rng(
hk.transform(residual_model_all_act))
params = fun.init(jax.random.PRNGKey(1), z)
input_bounds = jax_verify.IntervalBound(z - 1.0, z + 1.0)
fun_to_prop = functools.partial(fun.apply, params)
output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, input_bounds)
self.assertLen(output_bounds, 7)
@parameterized.named_parameters(
('Sequential', sequential_model),
('Residual', residual_model)
)
def test_tight_bounds_nostate(self, model):
z = jnp.array([[1., 2., 3.]])
params = model.init(jax.random.PRNGKey(1), z)
tight_input_bounds = jax_verify.IntervalBound(z, z)
fun_to_prop = functools.partial(model.apply, params)
tight_output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, tight_input_bounds)
model_eval = model.apply(params, z)
# Because the input lower bound is equal to the input upper bound, the value
# of the output bounds should be the same and correspond to the value of the
# forward pass.
self.assertAlmostEqual(jnp.abs(tight_output_bounds.upper
- tight_output_bounds.lower).max(), 0.,
delta=1e-6)
self.assertAlmostEqual(jnp.abs(tight_output_bounds.lower
- model_eval).max(), 0.,
delta=1e-6)
@parameterized.named_parameters(
('Sequential', sequential_model),
('Residual', residual_model),
('ResidualAll', residual_model_intermediate),
('1elt_list', single_element_list_model),
('dict_output', dict_output_model)
)
def test_matching_output_structure(self, model):
def _check_matching_structures(output_tree, bound_tree):
"""Replace all bounds/arrays with True, then compare pytrees."""
output_struct = tree.traverse(
lambda x: True if isinstance(x, jnp.ndarray) else None, output_tree)
bound_struct = tree.traverse(
lambda x: True if isinstance(x, bound_propagation.Bound) else None,
bound_tree)
tree.assert_same_structure(output_struct, bound_struct)
z = jnp.array([[1., 2., 3.]])
params = model.init(jax.random.PRNGKey(1), z)
input_bounds = jax_verify.IntervalBound(z - 1.0, z + 1.0)
model_output = model.apply(params, z)
fun_to_prop = functools.partial(model.apply, params)
for boundprop_method in [
jax_verify.interval_bound_propagation,
jax_verify.forward_crown_bound_propagation,
jax_verify.backward_crown_bound_propagation,
jax_verify.forward_fastlin_bound_propagation,
jax_verify.backward_fastlin_bound_propagation,
jax_verify.ibpforwardfastlin_bound_propagation,
]:
output_bounds = boundprop_method(fun_to_prop, input_bounds)
_check_matching_structures(model_output, output_bounds)
def test_jittable_input_bounds(self):
model = sequential_model
z = jnp.array([[1., 2., 3.]])
params = model.init(jax.random.PRNGKey(1), z)
fun_to_prop = functools.partial(model.apply, params)
non_jittable_bounds = jax_verify.IntervalBound(z - 1.0, z + 1.0)
jittable_input_bounds = non_jittable_bounds.to_jittable()
@jax.jit
def bound_prop_fun(inp_bound):
inp_bound, = bound_propagation.unjit_inputs(inp_bound)
bounds = jax_verify.interval_bound_propagation(fun_to_prop, inp_bound)
return bounds.lower, bounds.upper
# check that we can jit the bound prop and pass in jittable bounds.
out_lb, out_ub = bound_prop_fun(jittable_input_bounds)
self.assertTrue(all(out_ub >= out_lb))
# Check that this gives the same result as without the jit
bounds = jax_verify.interval_bound_propagation(fun_to_prop,
non_jittable_bounds)
chex.assert_trees_all_close(out_lb, bounds.lower)
chex.assert_trees_all_close(out_ub, bounds.upper)
class StaticArgumentModel(hk.Module):
def __init__(self):
super().__init__()
self.lin_1 = hk.Linear(5)
self.lin_2 = hk.Linear(10)
def __call__(self, inputs, use_2=True):
if use_2:
return self.lin_2(inputs)
else:
return self.lin_1(inputs)
class StaticArgumentsModelTest(parameterized.TestCase):
def test_staticargument_last(self):
@hk.without_apply_rng
@hk.transform
def forward(inputs, use_2):
model = StaticArgumentModel()
return model(inputs, use_2)
z = jnp.array([[1., 2., 3.]])
params = forward.init(jax.random.PRNGKey(1), z, True)
input_bounds = jax_verify.IntervalBound(z-1.0, z+1.0)
def fun_to_prop(inputs):
return forward.apply(params, inputs, True)
output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, input_bounds)
self.assertTrue((output_bounds.upper >= output_bounds.lower).all())
def test_staticargument_first(self):
@hk.without_apply_rng
@hk.transform
def forward(use_2, inputs):
model = StaticArgumentModel()
return model(inputs, use_2)
z = jnp.array([[1., 2., 3.]])
params = forward.init(jax.random.PRNGKey(1), True, z)
input_bounds = jax_verify.IntervalBound(z-1.0, z+1.0)
fun_to_prop = functools.partial(forward.apply, params, True)
output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, input_bounds)
self.assertTrue((output_bounds.upper >= output_bounds.lower).all())
def test_keywords_argument(self):
@hk.without_apply_rng
@hk.transform
def forward(inputs, use_2=False):
model = StaticArgumentModel()
return model(inputs, use_2)
z = jnp.array([[1., 2., 3.]])
params = forward.init(jax.random.PRNGKey(1), z, use_2=True)
input_bounds = jax_verify.IntervalBound(z-1.0, z+1.0)
fun_to_prop = functools.partial(forward.apply, params, use_2=True)
output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, input_bounds)
self.assertTrue((output_bounds.upper >= output_bounds.lower).all())
class ModelWithState(hk.Module):
def __init__(self):
super().__init__()
self.lin_1 = hk.Linear(5)
self.bn_1 = hk.BatchNorm(True, True, decay_rate=0.999)
def __call__(self, inputs, is_training, test_local_stats=False):
act = self.lin_1(inputs)
bn_act = self.bn_1(act, is_training, test_local_stats)
return bn_act
class StatefulModelTest(parameterized.TestCase):
def test_stateful_model(self):
@hk.transform_with_state
def forward(inputs, is_training, test_local_stats=False):
model = ModelWithState()
return model(inputs, is_training, test_local_stats)
z = jnp.array([[1., 2., 3.]])
params, state = forward.init(jax.random.PRNGKey(1), z, True, False)
def fun_to_prop(inputs):
outs = forward.apply(params, state, jax.random.PRNGKey(1),
inputs, False, False)
# Ignore the outputs that are not the network outputs.
return outs[0]
input_bounds = jax_verify.IntervalBound(z-1.0, z+1.0)
# Consider as static the state, the random generator, and the flags
output_bounds = jax_verify.interval_bound_propagation(
fun_to_prop, input_bounds)
self.assertTrue((output_bounds.upper >= output_bounds.lower).all())
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/bound_propagation_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Forward Linear Bounds."""
import functools
from typing import Callable
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src.linear import forward_linear_bounds
from jax_verify.src.linear import linear_relaxations
from jax_verify.tests import test_utils
def get_boundprop(name: str, elision: bool
) -> Callable[..., forward_linear_bounds.LinearBound]:
if name == 'fastlin':
relaxer = linear_relaxations.fastlin_rvt_relaxer
elif name == 'crown':
relaxer = linear_relaxations.crown_rvt_relaxer
transform = forward_linear_bounds.ConcretizingForwardLinearBoundTransform(
relaxer, elision)
algorithm = bound_propagation.ForwardPropagationAlgorithm(transform)
def bound_prop(function, *bounds) -> forward_linear_bounds.LinearBound:
output_bound, _ = bound_propagation.bound_propagation(
algorithm, function, *bounds)
return output_bound # pytype: disable=bad-return-type # jax-ndarray
return bound_prop
class ForwardLinBoundTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0.)
def assertArrayGreaterEqual(self, lhs, rhs):
diff = (lhs-rhs).min()
self.assertGreaterEqual(diff, 0.)
@parameterized.named_parameters(
('fastlin_noelision', 'fastlin', False),
('fastlin_elision', 'fastlin', True),
('crown_noelison', 'crown', False),
('crown_elision', 'crown', True))
def test_fc_fastlin(self, name, elision):
@hk.without_apply_rng
@hk.transform
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
fun = functools.partial(linear_model.apply, params)
bound_prop = get_boundprop(name, elision)
output_bounds = bound_prop(fun, input_bounds)
all_linear_functions = list(output_bounds.linear_functions())
self.assertLen(all_linear_functions, 1)
linear_fun = all_linear_functions[0]
self.assertTrue(jnp.all(linear_fun.lower_lin.lin_coeffs == 1.))
self.assertTrue(jnp.all(linear_fun.lower_lin.offset == 2.))
self.assertTrue(jnp.all(linear_fun.upper_lin.lin_coeffs == 1.))
self.assertTrue(jnp.all(linear_fun.upper_lin.offset == 2.))
self.assertArrayAlmostEqual(jnp.array([[0., 1., 2.]]),
linear_fun.reference_bound.bound.lower)
self.assertArrayAlmostEqual(jnp.array([[2., 3., 4.]]),
linear_fun.reference_bound.bound.upper)
self.assertAlmostEqual(5., output_bounds.lower)
self.assertAlmostEqual(11., output_bounds.upper)
@parameterized.named_parameters(
('fastlin_noelision', 'fastlin', False),
('fastlin_elision', 'fastlin', True),
('crown_noelison', 'crown', False),
('crown_elision', 'crown', True))
def test_conv2d_fastlin(self, name, elision):
@hk.without_apply_rng
@hk.transform
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv2d_model.apply, params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
bound_prop = get_boundprop(name, elision)
output_bounds = bound_prop(fun, input_bounds)
self.assertAlmostEqual(8., output_bounds.lower)
self.assertAlmostEqual(16., output_bounds.upper)
@parameterized.named_parameters(
('fastlin_noelision', 'fastlin', False),
('fastlin_elision', 'fastlin', True),
('crown_noelison', 'crown', False),
('crown_elision', 'crown', True))
def test_conv1d_fastlin(self, name, elision):
@hk.without_apply_rng
@hk.transform
def conv1d_model(inp):
return hk.Conv1D(output_channels=1, kernel_shape=2,
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([3., 4.])
z = jnp.reshape(z, [1, 2, 1])
params = {'conv1_d':
{'w': jnp.ones((2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv1d_model.apply, params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
bound_prop = get_boundprop(name, elision)
output_bounds = bound_prop(fun, input_bounds)
self.assertAlmostEqual(7., output_bounds.lower, delta=1e-5)
self.assertAlmostEqual(11., output_bounds.upper, delta=1e-5)
@parameterized.named_parameters(
('fastlin_noelision', 'fastlin', False),
('fastlin_elision', 'fastlin', True),
('crown_noelison', 'crown', False),
('crown_elision', 'crown', True))
def test_multiinput_add_fastlin(self, name, elision):
def add_model(inp_1, inp_2):
interm = inp_1 + inp_2
return interm.sum(axis=1)
z_1 = jnp.array([[-1., 1.]])
z_2 = jnp.array([[-1., 1.]])
bound_1 = jax_verify.IntervalBound(z_1 - 1., z_1 + 1.)
bound_2 = jax_verify.IntervalBound(z_2 - 1., z_2 + 1.)
out_lower = (z_1 + z_2 - 2.).sum()
out_upper = (z_1 + z_2 + 2.).sum()
bound_prop = get_boundprop(name, elision)
output_bounds = bound_prop(add_model, bound_1, bound_2)
self.assertArrayAlmostEqual(out_lower, output_bounds.lower)
self.assertArrayAlmostEqual(out_upper, output_bounds.upper)
@parameterized.named_parameters(
('fastlin_noelision', 'fastlin', False),
('fastlin_elision', 'fastlin', True),
('crown_noelison', 'crown', False),
('crown_elision', 'crown', True))
def test_multiinput_sub_fastlin(self, name, elision):
def sub_model(inp_1, inp_2):
interm = inp_1 - inp_2
return interm.sum(axis=1)
z_1 = jnp.array([[-1., 1.]])
z_2 = jnp.array([[-1., 1.]])
bound_1 = jax_verify.IntervalBound(z_1 - 1., z_1 + 1.)
bound_2 = jax_verify.IntervalBound(z_2 - 1., z_2 + 1.)
out_lower = (z_1 - z_2 - 2.).sum()
out_upper = (z_1 - z_2 + 2.).sum()
bound_prop = get_boundprop(name, elision)
output_bounds = bound_prop(sub_model, bound_1, bound_2)
self.assertArrayAlmostEqual(out_lower, output_bounds.lower)
self.assertArrayAlmostEqual(out_upper, output_bounds.upper)
@parameterized.named_parameters(
('fastlin_noelision', 'fastlin', False),
('fastlin_elision', 'fastlin', True),
('crown_noelison', 'crown', False),
('crown_elision', 'crown', True))
def test_multiinput_concatenate_fastlin(self, name, elision):
def concatenate_and_sum_model(inp_1, inp_2):
interm = jnp.concatenate((inp_1, inp_2), axis=1)
return interm.sum(axis=1)
z_1 = jnp.array([[-1., 1.]])
z_2 = jnp.array([[-1., 1.]])
bound_1 = jax_verify.IntervalBound(z_1 - 1., z_1 + 1.)
bound_2 = jax_verify.IntervalBound(z_2 - 1., z_2 + 1.)
out_lower = (z_1 + z_2 - 2.).sum()
out_upper = (z_1 - z_2 + 2.).sum()
bound_prop = get_boundprop(name, elision)
output_bounds = bound_prop(concatenate_and_sum_model, bound_1, bound_2)
self.assertArrayAlmostEqual(out_lower, output_bounds.lower)
self.assertArrayAlmostEqual(out_upper, output_bounds.upper)
def test_relu_fixed_fastlin(self):
def relu_model(inp):
return jax.nn.relu(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.forward_fastlin_bound_propagation(relu_model,
input_bounds)
self.assertArrayAlmostEqual(jnp.array([[0., 2.]]), output_bounds.lower)
self.assertArrayAlmostEqual(jnp.array([[0., 4.]]), output_bounds.upper)
def test_relu_random_fastlin(self):
def relu_model(inp):
return jax.nn.relu(inp)
relu_inp_shape = (4, 7)
lb, ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), relu_inp_shape, minval=-10., maxval=10.)
input_bounds = jax_verify.IntervalBound(lb, ub)
output_bounds = jax_verify.forward_fastlin_bound_propagation(
relu_model, input_bounds)
uniform_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(1),
(lb, ub), 100)
uniform_outs = jax.vmap(relu_model)(uniform_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for ReLU. The gap '
'between upper bound and empirical max is < 0')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for ReLU. The gap'
'between emp. min and lower bound is negative.')
def test_exp_fastlin(self):
def exp_model(inp):
return jnp.exp(inp)
exp_inp_shape = (4, 7)
lb, ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), exp_inp_shape, minval=-10., maxval=10.)
input_bounds = jax_verify.IntervalBound(lb, ub)
output_bounds = jax_verify.forward_fastlin_bound_propagation(
exp_model, input_bounds)
uniform_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(1),
(lb, ub), 100)
uniform_outs = jax.vmap(exp_model)(uniform_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for Exponential. The gap '
'between upper bound and empirical max is < 0')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for Exponential. The gap'
'between emp. min and lower bound is negative.')
def test_multiply_fastlin(self):
def multiply_model(lhs, rhs):
return lhs * rhs
mul_inp_shape = (4, 7)
lhs_lb, lhs_ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), mul_inp_shape, minval=-10., maxval=10.)
rhs_lb, rhs_ub = test_utils.sample_bounds(
jax.random.PRNGKey(1), mul_inp_shape, minval=-10., maxval=10.)
lhs_bounds = jax_verify.IntervalBound(lhs_lb, lhs_ub)
rhs_bounds = jax_verify.IntervalBound(rhs_lb, rhs_ub)
output_bounds = jax_verify.forward_fastlin_bound_propagation(
multiply_model, lhs_bounds, rhs_bounds)
uniform_lhs_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(2),
(lhs_lb, lhs_ub), 100)
uniform_rhs_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(3),
(rhs_lb, rhs_ub), 100)
uniform_outs = jax.vmap(multiply_model)(uniform_lhs_inps, uniform_rhs_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for Multiply. The gap '
'between upper bound and empirical max is negative')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for Multiply. The gap'
'between emp. min and lower bound is negative.')
def test_nobatch_batch_inputs(self):
batch_shape = (3, 2)
unbatch_shape = (2, 4)
def bilinear_model(inp_1, inp_2):
return jnp.einsum('bh,hH->bH', inp_1, inp_2)
lb_1, ub_1 = test_utils.sample_bounds(jax.random.PRNGKey(0), batch_shape,
minval=-10, maxval=10.)
lb_2, ub_2 = test_utils.sample_bounds(jax.random.PRNGKey(1), unbatch_shape,
minval=-10, maxval=10.)
bound_1 = jax_verify.IntervalBound(lb_1, ub_1)
bound_2 = jax_verify.IntervalBound(lb_2, ub_2)
output_bounds = jax_verify.forward_fastlin_bound_propagation(
bilinear_model, bound_1, bound_2)
uniform_1 = test_utils.sample_bounded_points(jax.random.PRNGKey(2),
(lb_1, ub_1), 100)
uniform_2 = test_utils.sample_bounded_points(jax.random.PRNGKey(3),
(lb_2, ub_2), 100)
uniform_outs = jax.vmap(bilinear_model)(uniform_1, uniform_2)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for mix of batched/unbatched'
'input bounds.')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for mix of batched/unbatched'
'input bounds.')
class IBPFastLinBoundTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0.)
def test_fc_fastlin(self):
@hk.without_apply_rng
@hk.transform
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
fun = functools.partial(linear_model.apply, params)
output_bounds = jax_verify.ibpforwardfastlin_bound_propagation(fun,
input_bounds)
self.assertAlmostEqual(5., output_bounds.lower)
self.assertAlmostEqual(11., output_bounds.upper)
def test_conv2d_fastlin(self):
@hk.without_apply_rng
@hk.transform
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv2d_model.apply, params)
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
output_bounds = jax_verify.ibpforwardfastlin_bound_propagation(fun,
input_bounds)
self.assertAlmostEqual(8., output_bounds.lower)
self.assertAlmostEqual(16., output_bounds.upper)
def test_conv1d_fastlin(self):
@hk.without_apply_rng
@hk.transform
def conv1d_model(inp):
return hk.Conv1D(output_channels=1, kernel_shape=2,
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([3., 4.])
z = jnp.reshape(z, [1, 2, 1])
params = {'conv1_d':
{'w': jnp.ones((2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv1d_model.apply, params)
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
output_bounds = jax_verify.ibpforwardfastlin_bound_propagation(fun,
input_bounds)
self.assertAlmostEqual(7., output_bounds.lower, delta=1e-5)
self.assertAlmostEqual(11., output_bounds.upper, delta=1e-5)
def test_relu_fastlin(self):
def relu_model(inp):
return jax.nn.relu(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
output_bounds = jax_verify.ibpforwardfastlin_bound_propagation(relu_model,
input_bounds)
self.assertArrayAlmostEqual(jnp.array([[0., 2.]]), output_bounds.lower)
self.assertArrayAlmostEqual(jnp.array([[0., 4.]]), output_bounds.upper)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/forward_linear_bounds_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backpropagation of sensitivity values."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src.branching import backpropagation
class BackpropagationTest(chex.TestCase):
def test_identity_network_leaves_sensitivities_unchanged(self):
# Set up an identity network.
def logits_fn(x):
return x
input_bounds = jax_verify.IntervalBound(
lower_bound=jnp.array([-1., 0., 1.]),
upper_bound=jnp.array([2., 3., 4.]))
# Backpropagation
output_sensitivities = jnp.array([.1, .2, -.3])
sensitivity_computation = backpropagation.SensitivityAlgorithm(
jax_verify.ibp_transform, [(0,)], output_sensitivities)
bound_propagation.bound_propagation(sensitivity_computation, # pytype: disable=wrong-arg-types # jax-ndarray
logits_fn, input_bounds)
input_sensitivities, = sensitivity_computation.target_sensitivities
chex.assert_trees_all_close(input_sensitivities, jnp.array([.1, .2, -.3]))
def test_relu_network_applies_chord_slopes_to_sensitivities(self):
# Set up some ReLUs, with a variety of input bounds:
# 1 blocking, 1 passing, and 3 'ambiguous' (straddling zero).
def logits_fn(x):
return jax.nn.relu(x)
input_bounds = jax_verify.IntervalBound(
lower_bound=jnp.array([-2., 1., -1., -4., -2.]),
upper_bound=jnp.array([-1., 2., 1., 1., 3.]))
# Backpropagation.
output_sensitivities = jnp.array([10., 10., 10., 10., 10.])
sensitivity_computation = backpropagation.SensitivityAlgorithm(
jax_verify.ibp_transform, [(0,)], output_sensitivities)
bound_propagation.bound_propagation(sensitivity_computation, # pytype: disable=wrong-arg-types # jax-ndarray
logits_fn, input_bounds)
input_sensitivities, = sensitivity_computation.target_sensitivities
# Expect blocking neurons to have no sensitivity, passing neurons to have
# full sensitivity, and ambiguous neurons to interpolate between the two.
chex.assert_trees_all_close(
input_sensitivities, jnp.array([0., 10., 5., 2., 6.]))
def test_affine_network_applies_transpose_to_sensitivites(self):
# Set up a matmul with bias.
w = jnp.array([[1., 4., -5.], [2., -3., 6.]])
b = jnp.array([20., 30., 40.])
def logits_fn(x):
return x @ w + b
input_bounds = jax_verify.IntervalBound(
lower_bound=jnp.zeros(shape=(1, 2)),
upper_bound=jnp.ones(shape=(1, 2)))
# Backpropagation.
output_sensitivities = jnp.array([[1., 0., -1.]])
sensitivity_computation = backpropagation.SensitivityAlgorithm(
jax_verify.ibp_transform, [(0,)], output_sensitivities)
bound_propagation.bound_propagation(sensitivity_computation, # pytype: disable=wrong-arg-types # jax-ndarray
logits_fn, input_bounds)
input_sensitivities, = sensitivity_computation.target_sensitivities
# Expect the transpose of w to have been applied to the sensitivities.
# The bias will be ignored.
chex.assert_trees_all_close(
input_sensitivities, jnp.array([[6., -4.]]))
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/backpropagation_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for propagating bounds through the networks defined in the Model Zoo.
We do not perform any check on the returned values but simply ensure that the
bound propagation can be performed on those networks.
"""
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src.mip_solver import cvxpy_relaxation_solver
from jax_verify.src.mip_solver import relaxation
from jax_verify.tests import model_zoo
import numpy as np
class ModelZooModelTests(parameterized.TestCase):
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel)
)
def test_ibp(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
jax_verify.interval_bound_propagation(logits_fun, input_bounds)
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel)
)
def test_fastlin(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
jax_verify.forward_fastlin_bound_propagation(logits_fun, input_bounds)
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel)
)
def test_ibpfastlin(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
jax_verify.ibpforwardfastlin_bound_propagation(logits_fun, input_bounds)
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel)
)
def test_backward_crown(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
jax_verify.backward_crown_bound_propagation(logits_fun, input_bounds)
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel)
)
def test_crownibp(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
jax_verify.crownibp_bound_propagation(logits_fun, input_bounds)
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel))
def test_nonconvex(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
# Test with IBP for intermediate bounds
jax_verify.nonconvex_ibp_bound_propagation(logits_fun, input_bounds)
# Test with nonconvex bound evaluation for intermediate bounds
jax_verify.nonconvex_constopt_bound_propagation(logits_fun, input_bounds)
@parameterized.named_parameters(
('SmallResidualModel', model_zoo.SmallResidualModel),
('TinyModel', model_zoo.TinyModel))
def test_cvxpy_relaxation(self, model_cls):
@hk.transform_with_state
def model_pred(inputs, is_training, test_local_stats=False):
model = model_cls()
return model(inputs, is_training, test_local_stats)
inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)
params, state = model_pred.init(jax.random.PRNGKey(42), inps,
is_training=True)
def logits_fun(inputs):
return model_pred.apply(params, state, None, inputs,
False, test_local_stats=False)[0]
output = logits_fun(inps)
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
boundprop_transform = jax_verify.ibp_transform
relaxation_transform = relaxation.RelaxationTransform(boundprop_transform)
var, env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(relaxation_transform),
logits_fun, input_bounds)
objective_bias = 0.
objective = jnp.zeros(output.shape[1:]).at[0].set(1)
index = 0
lower_bound, _, _ = relaxation.solve_relaxation(
cvxpy_relaxation_solver.CvxpySolver, objective, objective_bias,
var, env, index)
self.assertLessEqual(lower_bound, output[index, 0])
def _predict_mlp(params, inputs):
# pylint: disable=invalid-name
inputs = np.reshape(inputs, (inputs.shape[0], -1))
for W, b in params[:-1]:
outputs = jnp.dot(inputs, W) + b
inputs = jnp.maximum(outputs, 0)
W, b = params[-1]
return jnp.dot(inputs, W) + b
class SavedModelTests(parameterized.TestCase):
@parameterized.named_parameters(
('PGDNN', 'models/raghunathan18_pgdnn.pkl', 20, 19),
)
def test_mnist_mlp(self, model_name, num_examples, expected_correct):
with jax_verify.open_file('mnist/x_test_first100.npy', 'rb') as f:
mnist_x = np.load(f)
with jax_verify.open_file('mnist/y_test.npy', 'rb') as f:
mnist_y = np.load(f)
with jax_verify.open_file(model_name, 'rb') as f:
params = pickle.load(f) # pytype: disable=wrong-arg-types # due to GFile
logits = np.array(_predict_mlp(params, mnist_x[:num_examples]))
pred_labels = np.argmax(logits, axis=1)
num_correct = np.sum(np.equal(mnist_y[:num_examples], pred_labels))
print(num_correct)
assert num_correct == expected_correct, f'Number correct: {num_correct}'
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/model_zoo_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example architecture to test functions.
"""
import haiku as hk
import jax
class SmallResidualModel(hk.Module):
"""Small network with residual connections.
Smaller version of ResidualModel.
"""
def __init__(self):
super().__init__()
bn_config = {'create_scale': True,
'create_offset': True,
'decay_rate': 0.999}
# Definition of the modules.
self.conv_block = hk.Sequential([
hk.Conv2D(1, (3, 3), stride=3, rate=1), jax.nn.relu,
hk.Conv2D(1, (3, 3), stride=3, rate=1), jax.nn.relu,
])
self.conv_res_block = hk.Sequential([
hk.Conv2D(1, (1, 1), stride=1, rate=1), jax.nn.relu,
hk.Conv2D(1, (1, 1), stride=1, rate=1), jax.nn.relu,
])
self.reshape_mod = hk.Flatten()
self.lin_res_block = [
(hk.Linear(16), hk.BatchNorm(name='lin_batchnorm_0', **bn_config))
]
self.final_linear = hk.Linear(10)
def call_all_act(self, inputs, is_training, test_local_stats=False):
"""Evaluate the model, returning its intermediate activations.
Args:
inputs: BHWC array of images.
is_training: Boolean flag, whether this is during training.
test_local_stats: Boolean flag, Whether local stats are used
when is_training=False (for batchnorm).
Returns:
all_acts: List with the intermediate activations of interest.
"""
all_acts = []
all_acts.append(inputs)
## Forward propagation.
# First conv layer.
act = self.conv_block(inputs)
all_acts.append(act)
# Convolutional residual block.
act = act + self.conv_res_block(act)
all_acts.append(act)
# Reshape before fully connected part.
act = self.reshape_mod(act)
all_acts.append(act)
# Fully connected residual block.
lin_block_act = act
for lin_i, bn_i in self.lin_res_block:
lin_block_act = lin_i(lin_block_act)
lin_block_act = bn_i(lin_block_act, is_training, test_local_stats)
lin_block_act = jax.nn.relu(lin_block_act)
act = act + lin_block_act
all_acts.append(act)
# Final layer.
act = self.final_linear(act)
all_acts.append(act)
return all_acts
def __call__(self, inputs, is_training, test_local_stats=False):
"""Return only the final prediction of the model.
Args:
inputs: BHWC array of images.
is_training: Boolean flag, whether this is during training.
test_local_stats: Boolean flag, Whether local stats are used
when is_training=False (for batchnorm).
Returns:
pred: Array with the predictions, corresponding to the last activations.
"""
all_acts = self.call_all_act(inputs, is_training, test_local_stats)
return all_acts[-1]
class TinyModel(hk.Module):
"""Tiny network.
Single conv layer.
"""
def __init__(self):
super().__init__()
# Definition of the modules.
self.reshape_mod = hk.Flatten()
self.lin_block = hk.Sequential([
hk.Linear(20), jax.nn.relu,
])
self.final_linear = hk.Linear(10)
def call_all_act(self, inputs, is_training, test_local_stats=False):
"""Evaluate the model, returning its intermediate activations.
Args:
inputs: BHWC array of images.
is_training: Boolean flag, whether this is during training.
test_local_stats: Boolean flag, Whether local stats are used
when is_training=False (for batchnorm).
Returns:
all_acts: List with the intermediate activations of interest.
"""
all_acts = []
all_acts.append(inputs)
act = inputs
## Forward propagation.
act = self.reshape_mod(act)
all_acts.append(act)
# First linear layer.
act = self.lin_block(act)
all_acts.append(act)
# Final layer.
act = self.final_linear(act)
all_acts.append(act)
return all_acts
def __call__(self, inputs, is_training, test_local_stats=False):
"""Return only the final prediction of the model.
Args:
inputs: BHWC array of images.
is_training: Boolean flag, whether this is during training.
test_local_stats: Boolean flag, Whether local stats are used
when is_training=False (for batchnorm).
Returns:
pred: Array with the predictions, corresponding to the last activations.
"""
all_acts = self.call_all_act(inputs, is_training, test_local_stats)
return all_acts[-1]
| jax_verify-master | jax_verify/tests/model_zoo.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for simplifying network computation graphs."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import lax
from jax import numpy as jnp
import jax_verify
from jax_verify.src import synthetic_primitives
class SyntheticPrimitiveDetectorTest(parameterized.TestCase):
def _check_correct_impl(self, graph, simplifier, var_is_bound, *inps):
simplified_graph = synthetic_primitives.simplify_graph(simplifier, graph,
var_is_bound)
graph_outs = jax.core.eval_jaxpr(graph, [], *inps)
simple_graph_outs = jax.core.eval_jaxpr(simplified_graph, [], *inps)
for graph_out, simple_graph_out in zip(graph_outs, simple_graph_outs):
self.assertAlmostEqual(jnp.abs(graph_out - simple_graph_out).max(),
0., delta=1e-6)
def _find_eqn_in_simplified_graph(self, graph, simplifier, var_is_bound,
primitive):
# Check if the primitive is present. This imitates the recursive
# parsing done in bound_propagation, because depending on the platform,
# the primitive might be wrapped in a `custom_jvp_call_jaxpr_p`
# The loop is necessarily terminating because we always remove one level of
# nesting in the graph, so we will necessarily reach a level with no
# subgraph.
simplified_graph = synthetic_primitives.simplify_graph(simplifier, graph,
var_is_bound)
for eqn in simplified_graph.eqns:
if eqn.primitive in synthetic_primitives.SUBGRAPH_PRIMITIVES:
sub_graph = synthetic_primitives.jax_primitive_subgraph(
eqn.primitive, **eqn.params)
subgraph_var_is_bound = {}
for sub_invar, eqn_invar in zip(sub_graph.invars, eqn.invars):
if isinstance(eqn_invar, jax.core.Literal):
subgraph_var_is_bound[sub_invar] = False
else:
subgraph_var_is_bound[sub_invar] = var_is_bound[eqn_invar]
match = self._find_eqn_in_simplified_graph(
sub_graph, simplifier, subgraph_var_is_bound, primitive)
if match:
return match
elif eqn.primitive == primitive:
return eqn
return None
class ActivationDetectorTest(SyntheticPrimitiveDetectorTest):
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_softplus_detected(self, use_jit):
def softplus_model(inp):
return jax.nn.softplus(inp)
inp = jnp.array([[-2., 3.]])
if use_jit:
softplus_model = jax.jit(softplus_model)
parsed = synthetic_primitives.make_jaxpr_nojit(softplus_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
found_softplus = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.softplus_p)
self.assertIsNotNone(found_softplus)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_softmax_detected(self, use_jit):
def softmax_model(x):
return jax.nn.softmax(x)
if use_jit:
softmax_model = jax.jit(softmax_model)
inp = jnp.array([[-2., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(softmax_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.softmax_p)
self.assertIsNotNone(match)
self.assertEqual(match.params['axis'], 1) # pytype: disable=attribute-error
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_softmax_expended(self, use_jit):
def softmax_model(x):
return jax.nn.softmax(x)
if use_jit:
softmax_model = jax.jit(softmax_model)
inp = jnp.array([[-2., 3.],
[3., 3.1],
[-2., -2.],
[3., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(softmax_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
expand_softmax_simplifier = synthetic_primitives.simplifier_composition(
synthetic_primitives.activation_simplifier,
synthetic_primitives.expand_softmax_simplifier)
# Check that all of the components of the softmax that we would expect to
# find are present.
softmax_primitives = (
lax.exp_p,
lax.reduce_sum_p,
lax.broadcast_in_dim_p,
synthetic_primitives.posreciprocal_p,
lax.mul_p)
for prim_to_match in softmax_primitives:
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
expand_softmax_simplifier,
var_is_bound,
prim_to_match)
self.assertIsNotNone(match)
self._check_correct_impl(
parsed.jaxpr, expand_softmax_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_relu_detected(self, use_jit):
def relu_model(x):
return jax.nn.relu(x)
if use_jit:
relu_model = jax.jit(relu_model)
inp = jnp.array([[-2., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(relu_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.relu_p)
self.assertIsNotNone(match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_max_with_zero_detected_as_relu(self, use_jit):
def relu_model(x):
return jnp.maximum(x, 0.)
if use_jit:
relu_model = jax.jit(relu_model)
inp = jnp.array([[-2., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(relu_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.relu_p)
self.assertIsNotNone(match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_max_with_one_not_mistaken_for_relu(self, use_jit):
def notrelu_model(x):
return jnp.maximum(x, 1.)
if use_jit:
notrelu_model = jax.jit(notrelu_model)
inp = jnp.array([[-2., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(notrelu_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.relu_p)
self.assertIsNone(match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_leaky_relu_detected(self, use_jit):
def leaky_relu_model(x):
return jax.nn.leaky_relu(x)
if use_jit:
leaky_relu_model = jax.jit(leaky_relu_model)
inp = jnp.array([[-2., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(leaky_relu_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.leaky_relu_p)
self.assertIsNotNone(match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_sigmoid_detected(self, use_jit):
def sigmoid_model(x):
return jax.nn.sigmoid(x)
if use_jit:
sigmoid_model = jax.jit(sigmoid_model)
inp = jnp.array([[-2., 3.]])
parsed = synthetic_primitives.make_jaxpr_nojit(sigmoid_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.sigmoid_p)
self.assertIsNotNone(match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_clip_detected(self, use_jit):
def clip_model(x):
return jnp.clip(x, a_min=0., a_max=1.)
if use_jit:
clip_model = jax.jit(clip_model)
inp = jnp.array([[-2., 3., 0.5]])
parsed = synthetic_primitives.make_jaxpr_nojit(clip_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.clip_p)
self.assertIsNotNone(match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.activation_simplifier,
var_is_bound, inp)
@parameterized.product(
tested_fun=[jnp.minimum, jnp.maximum],
use_jit=[True, False],
include_linear=[True, False],
both_inp_bounds=[True, False],
broadcasting=[True, False])
def test_elementwise_minmax_replaced(
self,
tested_fun,
use_jit,
include_linear,
both_inp_bounds,
broadcasting,
):
def model_fun(inp_0, inp_1):
if include_linear:
lin_weight = jax.random.uniform(jax.random.PRNGKey(0),
inp_0.shape)
act = inp_0 * lin_weight
else:
act = inp_0
return tested_fun(act, inp_1)
if use_jit:
model_fun = jax.jit(model_fun)
if broadcasting:
shape_0 = (1, 8)
shape_1 = (7, 1)
else:
shape_0 = (2, 4)
shape_1 = (2, 4)
inp_0 = jax.random.uniform(jax.random.PRNGKey(0), shape_0)
inp_1 = jax.random.uniform(jax.random.PRNGKey(0), shape_1)
parsed = synthetic_primitives.make_jaxpr_nojit(model_fun, inp_0, inp_1)
var_is_bound = {parsed.jaxpr.invars[0]: True,
parsed.jaxpr.invars[1]: both_inp_bounds}
# Check that this is rewritten using a ReLU.
relu_match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.default_simplifier,
var_is_bound, synthetic_primitives.relu_p)
self.assertIsNotNone(relu_match)
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.default_simplifier, var_is_bound,
inp_0, inp_1)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_linear_detected(self, use_jit):
inp = jnp.array([[-1., 1.]])
key = jax.random.PRNGKey(0)
key_w, key_b = jax.random.split(key, 2)
w1 = jax.random.uniform(key_w, shape=(2, 5))
b1 = jax.random.uniform(key_b, shape=(5,))
def linear_model(inp, w1, b1):
"""Linear function involving several different linear operators."""
y = inp @ w1 + b1
centered_y = y - y.mean()
return centered_y.sum()
if use_jit:
linear_model = jax.jit(linear_model)
parsed = synthetic_primitives.make_jaxpr_nojit(linear_model, inp, w1, b1)
var_is_bound = {invar: is_bound for invar, is_bound
in zip(parsed.jaxpr.invars, [True, False, False])}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.group_linear_sequence,
var_is_bound,
synthetic_primitives.linear_p
)
# Check that all the components that we expect are there.
linear_subgraph = match.params['jax_verify_subgraph']
subgraph_primitives = [eqn.primitive for eqn in linear_subgraph.eqns]
self.assertIn(lax.dot_general_p, subgraph_primitives)
self.assertIn(lax.add_p, subgraph_primitives)
# There is two reduce_sum, one for the final sum, one for the mean.
self.assertEqual(2, sum(prim == lax.reduce_sum_p
for prim in subgraph_primitives))
# The mean also introduce a div.
self.assertIn(lax.div_p, subgraph_primitives)
self.assertIn(lax.sub_p, subgraph_primitives)
# Let's check that the simplification has not modified the behaviour of the
# model and can be forwarded through.
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.group_linear_sequence,
var_is_bound, inp, w1, b1)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_fusedrelu_detected(self, use_jit):
inp = jnp.array([-1., 1.])
key = jax.random.PRNGKey(0)
key_w, key_b = jax.random.split(key, 2)
w = jax.random.uniform(key_w, shape=(2, 5))
b = jax.random.uniform(key_b, shape=(5,))
def net_model(inp, w, b):
return jax.nn.relu(inp @ w + b)
if use_jit:
net_model = jax.jit(net_model)
parsed = synthetic_primitives.make_jaxpr_nojit(net_model, inp, w, b)
var_is_bound = {invar: is_bound for invar, is_bound
in zip(parsed.jaxpr.invars, [True] + [False] * 2)}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.fused_relu_simplifier,
var_is_bound,
synthetic_primitives.fused_relu_p)
self.assertIsNotNone(match)
# Let's check that the simplification has not modified the behaviour of the
# model and can be forwarded through.
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.fused_relu_simplifier,
var_is_bound, inp, w, b)
@parameterized.named_parameters(('jit', True), ('nojit', False))
def test_support_weaktype_input(self, use_jit):
inp = jnp.asarray(0.)
def net_model(inp):
return jnp.zeros(()) * inp
if use_jit:
net_model = jax.jit(net_model)
parsed = synthetic_primitives.make_jaxpr_nojit(net_model, inp)
var_is_bound = {parsed.jaxpr.invars[0]: True}
match = self._find_eqn_in_simplified_graph(
parsed.jaxpr,
synthetic_primitives.activation_simplifier,
var_is_bound,
synthetic_primitives.convert_float32_p)
self.assertIsNotNone(match)
# Let's check that the simplification has not modified the behaviour of the
# model and can be forwarded through.
self._check_correct_impl(
parsed.jaxpr, synthetic_primitives.fused_relu_simplifier,
var_is_bound, inp)
# Let's check that propagating bounds through this does not cause errors.
jax_verify.interval_bound_propagation(
net_model, jax_verify.IntervalBound(inp, inp))
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/synthetic_primitives_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Interval Bound Propagation."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
import numpy as np
class IBPTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0.)
def test_linear_ibp(self):
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(
hk.without_apply_rng(hk.transform(linear_model)).apply,
params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.interval_bound_propagation(fun, input_bounds)
self.assertAlmostEqual(5., output_bounds.lower)
self.assertAlmostEqual(11., output_bounds.upper)
fused_output_bounds = jax_verify.interval_bound_propagation(
fun, input_bounds, fused_linear=True)
self.assertAlmostEqual(5., fused_output_bounds.lower)
self.assertAlmostEqual(11., fused_output_bounds.upper)
def test_conv1d_ibp(self):
def conv1d_model(inp):
return hk.Conv1D(output_channels=1, kernel_shape=2,
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([3., 4.])
z = jnp.reshape(z, [1, 2, 1])
params = {'conv1_d':
{'w': jnp.ones((2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(
hk.without_apply_rng(hk.transform(conv1d_model)).apply,
params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.interval_bound_propagation(fun, input_bounds)
self.assertAlmostEqual(7., output_bounds.lower, delta=1e-5)
self.assertAlmostEqual(11., output_bounds.upper, delta=1e-5)
fused_output_bounds = jax_verify.interval_bound_propagation(
fun, input_bounds, fused_linear=True)
self.assertAlmostEqual(7., fused_output_bounds.lower, delta=1e-5)
self.assertAlmostEqual(11., fused_output_bounds.upper, delta=1e-5)
def test_conv2d_ibp(self):
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(
hk.without_apply_rng(hk.transform(conv2d_model)).apply,
params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.interval_bound_propagation(fun, input_bounds)
self.assertAlmostEqual(8., output_bounds.lower)
self.assertAlmostEqual(16., output_bounds.upper)
fused_output_bounds = jax_verify.interval_bound_propagation(
fun, input_bounds, fused_linear=True)
self.assertAlmostEqual(8., fused_output_bounds.lower)
self.assertAlmostEqual(16., fused_output_bounds.upper)
@parameterized.named_parameters(
('exp', jnp.exp, [[-2.0, 3.0]]),
('log', jnp.log, [[3.0, 5.0]]),
('relu', jax.nn.relu, [[-2.0, 3.0]]),
('softplus', jax.nn.softplus, [[-2.0, 3.0]]),
('sign', jnp.sign, [[-2.0, 3.0]]),
('sigmoid', jax.nn.sigmoid, [[-2.0, 3.0]]),
(
'dynamic_slice',
lambda x: jax.lax.dynamic_slice(x, (1, 2, 3), (2, 1, 1)),
np.arange(24).reshape((2, 3, 4)),
),
)
def test_passthrough_primitive(self, fn, inputs):
z = jnp.array(inputs)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.interval_bound_propagation(fn, input_bounds)
self.assertArrayAlmostEqual(fn(input_bounds.lower), output_bounds.lower)
self.assertArrayAlmostEqual(fn(input_bounds.upper), output_bounds.upper)
def test_ibp_neg(self):
fn = lambda x: -x
input_bounds = jax_verify.IntervalBound(jnp.zeros((2,)), jnp.ones((2,)))
output_bounds = jax_verify.interval_bound_propagation(fn, input_bounds)
self.assertArrayAlmostEqual(output_bounds.lower, -jnp.ones((2,)))
self.assertArrayAlmostEqual(output_bounds.upper, jnp.zeros((2,)))
@parameterized.named_parameters(
('positive', (1.0, 4.0), (1.0, 2.0)),
('negative', (-4.0, -1.0), (float('nan'), float('nan'))),
('zero_edge', (0.0, 1.0), (0.0, 1.0)),
('zero_cross', (-1.0, 1.0), (float('nan'), 1.0)))
def test_sqrt(self, input_bounds, expected):
input_bounds = jax_verify.IntervalBound( # pytype: disable=wrong-arg-types # jax-ndarray
np.array([input_bounds[0], 0.0]), np.array([input_bounds[1], 0.0]))
output_bounds = jax_verify.interval_bound_propagation(
jnp.sqrt, input_bounds)
np.testing.assert_array_equal(
np.array([expected[0], 0.0]), output_bounds.lower)
np.testing.assert_array_equal(
np.array([expected[1], 0.0]), output_bounds.upper)
@parameterized.named_parameters(
('square_positive', 2, (1.0, 2.0), (1.0, 4.0)),
('square_negative', 2, (-2.0, -1.0), (1.0, 4.0)),
('square_zero', 2, (-1.0, 2.0), (0.0, 4.0)),
('cube_positive', 3, (1.0, 2.0), (1.0, 8.0)),
('cube_negative', 3, (-2.0, -1.0), (-8.0, -1.0)),
('cube_zero', 3, (-1.0, 2.0), (-1.0, 8.0)))
def test_integer_pow(self, exponent, input_bounds, expected):
@jax.jit
def _compute_bounds(lower, upper):
input_bounds = jax_verify.IntervalBound(lower, upper)
output_bounds = jax_verify.interval_bound_propagation(
lambda x: x**exponent, input_bounds)
return output_bounds.lower, output_bounds.upper
output_bounds = _compute_bounds(
np.array([input_bounds[0], 0.0]), np.array([input_bounds[1], 0.0]))
np.testing.assert_array_equal(
np.array([expected[0], 0.0]), output_bounds[0])
np.testing.assert_array_equal(
np.array([expected[1], 0.0]), output_bounds[1])
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/ibp_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the bound propagation on NonConvex bounds.
We don't really check the value but at least that the different propagation
works and that the bound can be evaluated.
"""
import collections
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src import ibp
from jax_verify.src.mip_solver import cvxpy_relaxation_solver
from jax_verify.src.mip_solver import relaxation
from jax_verify.src.nonconvex import duals
from jax_verify.src.nonconvex import nonconvex
from jax_verify.src.nonconvex import optimizers
from jax_verify.tests import test_utils
import numpy as np
def _random_objectives_primal_variables(rng_key, nonconvex_bound,
nb_opt_targets):
# Get a set of primal variables
var_set = {}
for pos, var_shape in nonconvex_bound.variables.items(): # pytype: disable=attribute-error # jax-ndarray
rng_key, new_key = jax.random.split(rng_key)
var_set[pos] = jax.random.uniform(
new_key, shape=(nb_opt_targets, *var_shape))
objectives_dict = {}
for index, prev_bound in nonconvex_bound.previous_bounds.items(): # pytype: disable=attribute-error # jax-ndarray
rng_key, new_key = jax.random.split(rng_key)
bound_shape = prev_bound.shape
linfun_shape = (nb_opt_targets, *bound_shape)
objectives_dict[index] = jax.random.normal(new_key, shape=linfun_shape)
return objectives_dict, var_set
class NonConvexBoundTest(parameterized.TestCase):
@parameterized.named_parameters(
('base_bound', jax_verify.nonconvex_ibp_bound_propagation),
('inner_opt', jax_verify.nonconvex_constopt_bound_propagation))
def test_fc_nonconvex(self, boundprop_fun):
@hk.without_apply_rng
@hk.transform
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
fun = functools.partial(linear_model.apply, params)
output_bounds = boundprop_fun(fun, input_bounds)
concretizer = optimizers.OptimizingConcretizer(
optimizers.PGDOptimizer(0, 0.), 0)
final_bounds = concretizer.get_bounds(output_bounds)
self.assertTrue(all(final_bounds.upper >= final_bounds.lower))
@parameterized.named_parameters(
('base_bound', jax_verify.nonconvex_ibp_bound_propagation),
('inner_opt', jax_verify.nonconvex_constopt_bound_propagation))
def test_conv2d_nonconvex(self, boundprop_fun):
@hk.without_apply_rng
@hk.transform
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv2d_model.apply, params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = boundprop_fun(fun, input_bounds)
concretizer = optimizers.OptimizingConcretizer(
optimizers.PGDOptimizer(0, 0.), 0)
final_bounds = concretizer.get_bounds(output_bounds)
self.assertTrue(all(final_bounds.upper >= final_bounds.lower))
@parameterized.named_parameters(
('base_bound', jax_verify.nonconvex_ibp_bound_propagation),
('inner_opt', jax_verify.nonconvex_constopt_bound_propagation))
def test_relu_nonconvex(self, boundprop_fun):
def relu_model(inp):
return jax.nn.relu(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = boundprop_fun(relu_model, input_bounds)
concretizer = optimizers.OptimizingConcretizer(
optimizers.PGDOptimizer(0, 0.), 0)
final_bounds = concretizer.get_bounds(output_bounds)
self.assertTrue((final_bounds.upper >= final_bounds.lower).all())
@parameterized.named_parameters(
('base_bound', jax_verify.nonconvex_ibp_bound_propagation),
('inner_opt', jax_verify.nonconvex_constopt_bound_propagation))
def test_softplus_nonconvex(self, boundprop_fun):
def softplus_model(inp):
return jax.nn.softplus(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = boundprop_fun(softplus_model, input_bounds)
concretizer = optimizers.OptimizingConcretizer(
optimizers.PGDOptimizer(0, 0.), 0)
final_bounds = concretizer.get_bounds(output_bounds)
self.assertTrue((final_bounds.upper >= final_bounds.lower).all())
def test_dualopt_noncrossing_bounds(self):
randgen = np.random.RandomState(42)
params = [
(randgen.normal(size=(784, 2)), randgen.normal(size=(2,))),
(randgen.normal(size=(2, 10)), randgen.normal(size=(10,))),
]
with jax_verify.open_file('mnist/x_test_first100.npy', 'rb') as f:
mnist_x = np.load(f)
inps = jnp.reshape(mnist_x[:4], (4, 784))
def logits_fun(inp):
preact_1 = jnp.dot(inp, params[0][0]) + params[0][1]
act_1 = jax.nn.softplus(preact_1)
out = jnp.dot(act_1, params[1][0]) + params[1][1]
return out
input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)
nonconvex_ibp_bounds = jax_verify.nonconvex_ibp_bound_propagation(
logits_fun, input_bounds)
# Optimizing on the dual is the way to directly optimize the bounds that we
# have so it increases the chance of finding places where the dual is badly
# calculated.
concretizer = optimizers.OptimizingConcretizer(
optimizers.PGDOptimizer(5, 1, optimize_dual=True), 0)
final_pgddual_ibp_bounds = concretizer.get_bounds(nonconvex_ibp_bounds)
self.assertTrue((final_pgddual_ibp_bounds.upper >=
final_pgddual_ibp_bounds.lower).all())
@parameterized.named_parameters(
('Wolfe', duals.WolfeNonConvexBound),
('LinLagrangian', duals.LinLagrangianNonConvexBound),
('MinLagrangian', duals.MinLagrangianNonConvexBound)
)
def test_negative_dualgap(self, bound_cls):
batch_size = 2
nb_opt_targets = 3
key = jax.random.PRNGKey(42)
problem_key, var_key = jax.random.split(key)
fun, (lb, ub) = test_utils.set_up_toy_problem(problem_key, batch_size,
[64, 2, 2, 2])
input_bounds = jax_verify.IntervalBound(lb, ub)
algorithm = nonconvex.nonconvex_algorithm(
bound_cls, nonconvex.BaseBoundConcretizer(),
base_boundprop=ibp.bound_transform)
# NonConvex bound
nonconvex_ibp_bounds, _ = bound_propagation.bound_propagation(
algorithm, fun, input_bounds)
objectives, var_set = _random_objectives_primal_variables(
var_key, nonconvex_ibp_bounds, nb_opt_targets)
primal, dual = nonconvex_ibp_bounds.dual(var_set, objectives) # pytype: disable=attribute-error # jax-ndarray
self.assertTrue((dual <= primal).all())
def test_collect_lagrangian_layers(self):
batch_size = 2
nb_opt_targets = 3
key = jax.random.PRNGKey(42)
problem_key, primal_var_key, dual_var_key = jax.random.split(key, num=3)
fun, (lb, ub) = test_utils.set_up_toy_problem(problem_key, batch_size,
[32, 18, 2])
input_bounds = jax_verify.IntervalBound(lb, ub)
# Get the MinLagrangian and LinLagrangian propagation
linlag_bound_algorithm = nonconvex.nonconvex_algorithm(
duals.LinLagrangianNonConvexBound,
nonconvex.BaseBoundConcretizer(), base_boundprop=ibp.bound_transform)
minlag_bound_algorithm = nonconvex.nonconvex_algorithm(
duals.MinLagrangianNonConvexBound,
nonconvex.BaseBoundConcretizer(), base_boundprop=ibp.bound_transform)
# NonConvex bound
linlag_bound, _ = bound_propagation.bound_propagation(
linlag_bound_algorithm, fun, input_bounds)
minlag_bound, _ = bound_propagation.bound_propagation(
minlag_bound_algorithm, fun, input_bounds)
objectives, var_set = _random_objectives_primal_variables(
primal_var_key, linlag_bound, nb_opt_targets)
_, acts = linlag_bound.primal_fn(var_set, objectives) # pytype: disable=attribute-error # jax-ndarray
dual_vars = {}
for index, primal_var in var_set.items():
dual_var_key, new_key = jax.random.split(dual_var_key)
dual_vars[index] = jax.random.normal(new_key, shape=primal_var.shape)
## Test separately each layers
for index in linlag_bound.previous_bounds: # pytype: disable=attribute-error # jax-ndarray
linlag_intermediate_bound = linlag_bound.previous_bounds[index] # pytype: disable=attribute-error # jax-ndarray
minlag_intermediate_bound = minlag_bound.previous_bounds[index] # pytype: disable=attribute-error # jax-ndarray
lagrangian_level_fn = linlag_intermediate_bound.lagrangian_level_fn # pytype: disable=attribute-error # jax-ndarray
lagrangian_varterms_fn = minlag_intermediate_bound.lagrangian_varterms_fn # pytype: disable=attribute-error # jax-ndarray
dvar = dual_vars[index]
# Get all the per variables term for a level, and evaluate them
lagrangian_dict = collections.defaultdict(list)
lagrangian_varterms_fn(dvar, lagrangian_dict)
per_var_lagrangian = 0
for var_idx, lag_terms in lagrangian_dict.items():
for term in lag_terms:
out_term = term[1](acts[var_idx])
dims_to_reduce = tuple(range(1, out_term.ndim))
per_var_lagrangian = per_var_lagrangian + out_term.sum(dims_to_reduce)
# Get simply the lagrangian for a level and evaluate it.
per_level_lagrangian = lagrangian_level_fn(dvar, acts)
# The two should give exactly the same results.
diff = jnp.abs(per_level_lagrangian - per_var_lagrangian).max()
self.assertAlmostEqual(
diff, 0, delta=1e-3,
msg=f'Difference in the lagrangian computation for layer {index}')
def test_collect_lagrangian_network(self):
batch_size = 2
nb_opt_targets = 3
key = jax.random.PRNGKey(42)
problem_key, primal_var_key, dual_var_key = jax.random.split(key, num=3)
fun, (lb, ub) = test_utils.set_up_toy_problem(problem_key, batch_size,
[64, 2, 2, 2])
input_bounds = jax_verify.IntervalBound(lb, ub)
# Get the MinLagrangian and LinLagrangian propagation
linlag_bound_algorithm = nonconvex.nonconvex_algorithm(
duals.LinLagrangianNonConvexBound,
nonconvex.BaseBoundConcretizer(), base_boundprop=ibp.bound_transform)
minlag_bound_algorithm = nonconvex.nonconvex_algorithm(
duals.MinLagrangianNonConvexBound,
nonconvex.BaseBoundConcretizer(), base_boundprop=ibp.bound_transform)
# NonConvex bound
linlag_bound, _ = bound_propagation.bound_propagation(
linlag_bound_algorithm, fun, input_bounds)
minlag_bound, _ = bound_propagation.bound_propagation(
minlag_bound_algorithm, fun, input_bounds)
objectives, var_set = _random_objectives_primal_variables(
primal_var_key, minlag_bound, nb_opt_targets)
primal, acts = minlag_bound.primal_fn(var_set, objectives) # pytype: disable=attribute-error # jax-ndarray
dual_vars = {}
for index, primal_var in var_set.items():
dual_var_key, new_key = jax.random.split(dual_var_key)
dual_vars[index] = jax.random.normal(new_key, shape=primal_var.shape)
all_lagrangian_terms = minlag_bound.collect_lagrangian_varterms( # pytype: disable=attribute-error # jax-ndarray
objectives, dual_vars)
per_var_lagrangian = primal
for var_index, lag_terms in all_lagrangian_terms.items():
for term in lag_terms:
all_contrib = term[1](acts[var_index])
dims_to_reduce = tuple(range(1, all_contrib.ndim))
var_contrib = all_contrib.sum(axis=dims_to_reduce)
per_var_lagrangian = per_var_lagrangian + var_contrib
per_level_lagrangian, _ = linlag_bound._lagrangian_fn(acts, objectives, # type: ignore # jax-ndarray
dual_vars)
diff = jnp.abs(per_level_lagrangian - per_var_lagrangian).max()
self.assertAlmostEqual(
diff, 0, delta=1e-3,
msg='The two lagrangian implementation are not equivalent.')
# Let's also sanity check that we can correctly optimize our lagrangian
# terms.
for var_idx, lag_terms in all_lagrangian_terms.items():
lower = minlag_bound.previous_bounds[var_idx].lower # pytype: disable=attribute-error # jax-ndarray
lower = jnp.repeat(jnp.expand_dims(lower, 0), nb_opt_targets, axis=0)
upper = minlag_bound.previous_bounds[var_idx].upper # pytype: disable=attribute-error # jax-ndarray
upper = jnp.repeat(jnp.expand_dims(upper, 0), nb_opt_targets, axis=0)
def eval_lagrangian_terms(var_act, lag_terms=lag_terms):
per_var_lagrangians = []
for term in lag_terms:
out_term = term[1](var_act)
dims_to_reduce = tuple(range(1, out_term.ndim))
per_var_lagrangians.append(out_term.sum(dims_to_reduce))
return sum(per_var_lagrangians)
minimizing_input = duals._optimize_lagrangian_terms(
lag_terms, lower, upper)
minimized_varlagrangian = eval_lagrangian_terms(minimizing_input)
for _ in range(10):
unif = np.random.uniform(size=lower.shape)
candidate_input = lower + unif * (upper - lower)
candidate_varlagrangian = eval_lagrangian_terms(candidate_input)
min_gap = (candidate_varlagrangian - minimized_varlagrangian).min()
self.assertGreater(min_gap, 0,
msg=('Minimization of the lagrangian with regards to'
f'variable {var_idx} is not correct.'))
@parameterized.named_parameters(
('Wolfe', duals.WolfeNonConvexBound),
('LinLagrangian', duals.LinLagrangianNonConvexBound),
('MinLagrangian', duals.MinLagrangianNonConvexBound)
)
def test_comparefista_to_cvxpy(self, bound_cls):
batch_size = 2
key = jax.random.PRNGKey(42)
fun, (lb, ub) = test_utils.set_up_toy_problem(key, batch_size, [32, 18, 2])
input_bounds = jax_verify.IntervalBound(lb, ub)
# NonConvex Result
nonconvex_ibp_bound_algorithm = nonconvex.nonconvex_algorithm(
bound_cls, nonconvex.BaseBoundConcretizer(),
base_boundprop=ibp.bound_transform)
nonconvex_ibp_bounds, _ = bound_propagation.bound_propagation(
nonconvex_ibp_bound_algorithm, fun, input_bounds)
fista_optimizer = optimizers.LinesearchFistaOptimizer(
40, beta_l=0.8, termination_dual_gap=1e-6)
fista_concretizer = optimizers.OptimizingConcretizer(
fista_optimizer, 0)
dual_bound = fista_concretizer.get_bounds(nonconvex_ibp_bounds)
relaxation_transform = relaxation.RelaxationTransform(
jax_verify.ibp_transform)
cvxpy_final_var, env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(relaxation_transform),
fun, input_bounds)
nb_targets = np.prod(cvxpy_final_var.shape[1:]) # pytype: disable=attribute-error # jax-ndarray
for batch_index in range(batch_size):
for target_index in range(nb_targets):
objective = (jnp.arange(nb_targets) == target_index).astype(jnp.float32)
objective_bias = 0.
cvxpy_lower, _, _ = relaxation.solve_relaxation(
cvxpy_relaxation_solver.CvxpySolver, objective, objective_bias,
cvxpy_final_var, env, batch_index)
cvxpy_neg_upper, _, _ = relaxation.solve_relaxation(
cvxpy_relaxation_solver.CvxpySolver, -objective, objective_bias,
cvxpy_final_var, env, batch_index)
cvxpy_upper = - cvxpy_neg_upper
nonconvex_lower = dual_bound.lower[batch_index, target_index]
nonconvex_upper = dual_bound.upper[batch_index, target_index]
self.assertGreaterEqual(
nonconvex_upper, nonconvex_lower,
msg='Bounds are crossing.')
self.assertAlmostEqual(
cvxpy_lower, nonconvex_lower, delta=1e-2,
msg='Inaccurate lower bound.')
self.assertAlmostEqual(
cvxpy_upper, nonconvex_upper, delta=1e-2,
msg='Inaccurate upper bound ')
def test_chunked_optimization(self):
batch_size = 3
key = jax.random.PRNGKey(42)
fun, (lb, ub) = test_utils.set_up_toy_problem(key, batch_size, [32, 16, 10])
input_bounds = jax_verify.IntervalBound(lb, ub)
nonconvex_bound = jax_verify.nonconvex_ibp_bound_propagation(
fun, input_bounds)
optimizer = optimizers.PGDOptimizer(10, 0.1)
chunked_concretizer = optimizers.OptimizingConcretizer(
optimizer, 3)
full_concretizer = optimizers.OptimizingConcretizer(optimizer, 0)
full_bounds = full_concretizer.get_bounds(nonconvex_bound)
chunked_bounds = chunked_concretizer.get_bounds(nonconvex_bound)
np.testing.assert_array_almost_equal(chunked_bounds.lower,
full_bounds.lower)
np.testing.assert_array_almost_equal(chunked_bounds.upper,
full_bounds.upper)
def test_objfun_derivation(self):
batch_size = 3
nb_opt_targets = 8
key = jax.random.PRNGKey(42)
problem_key, objective_key = jax.random.split(key)
fun, (lb, ub) = test_utils.set_up_toy_problem(problem_key, batch_size,
[32, 16, 10])
input_bounds = jax_verify.IntervalBound(lb, ub)
nonconvex_bound = jax_verify.nonconvex_ibp_bound_propagation(
fun, input_bounds)
objectives, var_set = _random_objectives_primal_variables(
objective_key, nonconvex_bound, nb_opt_targets)
# On-paper derived derivatives for the standard dot product objective
# is just `objectives`.
ref_dual_vars = objectives.copy()
# Compute it using autograd.
dual_vars, _ = nonconvex_bound._compute_dualvars_convexgrad(var_set,
objectives)
for node_idx in ref_dual_vars:
np.testing.assert_array_almost_equal(dual_vars[node_idx],
ref_dual_vars[node_idx])
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/nonconvex_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for branching."""
import functools
from absl.testing import absltest
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import ibp
from jax_verify.src.branching import branch_algorithm
from jax_verify.src.branching import branch_selection
class BranchAlgorithmTest(chex.TestCase):
def test_identity_network_leaves_sensitivities_unchanged(self):
# Set up a small network.
@hk.transform
def forward_fn(x):
x = hk.Linear(7)(x)
x = jax.nn.relu(x)
x = hk.Linear(5)(x)
return x
input_bounds = jax_verify.IntervalBound(
lower_bound=jnp.array([-1., 0., 1.]),
upper_bound=jnp.array([2., 3., 4.]))
params = forward_fn.init(jax.random.PRNGKey(0), input_bounds.lower)
spec_fn = functools.partial(forward_fn.apply, params, None)
upper_bound = branch_algorithm.upper_bound_with_branching(
ibp.bound_transform,
branch_selection.ReluSelector(),
spec_fn,
input_bounds,
num_branches=5)
chex.assert_equal((5,), upper_bound.shape)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/branch_algorithm_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the convex relaxation of different primitives."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import cvxpy as cp
import jax
from jax import lax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import activation_relaxation
from jax_verify.src import bound_propagation
from jax_verify.src import simplex_bound
from jax_verify.src import synthetic_primitives
from jax_verify.tests import test_utils
import numpy as np
IntervalBound = bound_propagation.IntervalBound
TOL = 1e-5
class ConvexRelaxationTest(parameterized.TestCase):
def _sample_from_bound(self, rng_key, bound, nb_points):
if isinstance(bound, simplex_bound.SimplexIntervalBound):
return test_utils.sample_bounded_simplex_points(
rng_key, (bound.lower, bound.upper), bound.simplex_sum,
nb_points)
else:
return test_utils.sample_bounded_points(
rng_key, (bound.lower, bound.upper), nb_points)
def _check_bounds(self, key, fun, lb_fun, ub_fun, bounds, nb_samples=1000):
"""Check that lb_fun and ub_fun actually bound the function fun.
This is evaluated at a number of random samples.
Args:
key: PRNG key for random number generation
fun: Function to be bounded.
lb_fun: Lower bound function.
ub_fun: Upper bound function.
bounds: List of bounds on the inputs.
nb_samples: How many random samples to draw for testing.
"""
keys = jax.random.split(key, len(bounds))
# Build the uniform samples.
inps = []
for inp_idx, bound in enumerate(bounds):
inps.append(self._sample_from_bound(keys[inp_idx], bound, nb_samples))
vmap_fun = jax.vmap(fun)
vmap_lbfun = jax.vmap(lb_fun)
vmap_ubfun = jax.vmap(ub_fun)
samples_eval = vmap_fun(*inps)
lb_eval = vmap_lbfun(*inps)
ub_eval = vmap_ubfun(*inps)
self.assertGreaterEqual(
(samples_eval - lb_eval).min(), -TOL,
msg='Lower Bound is invalid')
self.assertGreaterEqual(
(ub_eval - samples_eval).min(), -TOL,
msg='Upper Bound is invalid')
def _check_convexity(self, key, fun, bounds, is_convex, nb_samples=100):
"""Check that the function is convex or concave.
We do this by sanity-checking that the function is below its chord.
Args:
key: PRNG key for random number generation
fun: Function to be checked.
bounds: List of bounds on the inputs.
is_convex: Boolean, if True: check that the function is convex.
if False: check that the function is concave.
nb_samples: How many random samples to draw for testing.
"""
keys = jax.random.split(key, 2*len(bounds) + 1)
a_inps = []
b_inps = []
interp_inps = []
interp_coeffs = jax.random.uniform(keys[-1], (nb_samples,))
for inp_idx, bound in enumerate(bounds):
interp_coeffs_shape = (-1,) + (1,)*bound.lower.ndim
broad_interp_coeffs = jnp.reshape(interp_coeffs, interp_coeffs_shape)
a_inp = self._sample_from_bound(keys[2*inp_idx], bound, nb_samples)
b_inp = self._sample_from_bound(keys[2*inp_idx+1], bound, nb_samples)
interp_inp = (a_inp * broad_interp_coeffs +
b_inp * (1. - broad_interp_coeffs))
a_inps.append(a_inp)
b_inps.append(b_inp)
interp_inps.append(interp_inp)
vmap_fun = jax.vmap(fun)
a_eval = vmap_fun(*a_inps)
b_eval = vmap_fun(*b_inps)
interp_eval = vmap_fun(*interp_inps)
interp_coeffs_shape = (-1,) + (1,)*(interp_eval.ndim - 1)
broad_interp_coeffs = jnp.reshape(interp_coeffs, interp_coeffs_shape)
chord_eval = (a_eval * broad_interp_coeffs +
b_eval * (1. - broad_interp_coeffs))
if is_convex:
self.assertGreaterEqual(
(chord_eval - interp_eval).min(), -TOL,
msg='Function is not convex')
else:
self.assertGreaterEqual(
(interp_eval - chord_eval).min(), -TOL,
msg='Function is not concave')
class DefaultConvexRelaxationTest(ConvexRelaxationTest):
def test_abs(self):
batch_size = 5
axis_dim = 8
abs_inp_shape = (batch_size, axis_dim)
def abs_model(inp):
return jnp.abs(inp)
bound_key = jax.random.PRNGKey(0)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, abs_inp_shape,
minval=-10., maxval=10.)
inp_bound = IntervalBound(inp_lb, inp_ub)
lb_fun, ub_fun = activation_relaxation.convex_fn_relaxation(
lax.abs_p, inp_bound)
# Check that the bounds are valid
uniform_check_key = jax.random.PRNGKey(1)
self._check_bounds(uniform_check_key, abs_model, lb_fun, ub_fun,
[inp_bound])
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(2)
self._check_convexity(cvx_check_key, lb_fun, [inp_bound], True)
ccv_check_key = jax.random.PRNGKey(3)
self._check_convexity(ccv_check_key, ub_fun, [inp_bound], False)
@parameterized.named_parameters(
('pos_smaller_than_1', 0.5),
('pos_higher_than_1', 1.5),
('neg', -1.))
def test_leaky_relu(self, negative_slope):
batch_size = 5
axis_dim = 8
leaky_relu_inp_shape = (batch_size, axis_dim)
def leaky_relu_model(inp):
return jax.nn.leaky_relu(inp, negative_slope)
bound_key = jax.random.PRNGKey(0)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, leaky_relu_inp_shape,
minval=-10., maxval=10.)
inp_bound = IntervalBound(inp_lb, inp_ub)
lb_fun, ub_fun = activation_relaxation.intersection_relaxation(
activation_relaxation.leaky_relu_piecewise_linear_relaxation,
inp_bound, negative_slope=negative_slope)
# Check that the bounds are valid
uniform_check_key = jax.random.PRNGKey(1)
self._check_bounds(uniform_check_key, leaky_relu_model, lb_fun, ub_fun,
[inp_bound])
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(2)
self._check_convexity(cvx_check_key, lb_fun, [inp_bound], True)
ccv_check_key = jax.random.PRNGKey(3)
self._check_convexity(ccv_check_key, ub_fun, [inp_bound], False)
@parameterized.named_parameters(
('small_scale', 0.01),
('normal_scale', 1),
('large_scale', 1e4),
('very_large_scale', 1e8))
def test_sigmoid(self, scale):
batch_size = 5
axis_dim = 8
sigmoid_inp_shape = (batch_size, axis_dim)
sigmoid = jax.nn.sigmoid
bound_key = jax.random.PRNGKey(0)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, sigmoid_inp_shape,
minval=-scale, maxval=scale)
inp_bound = IntervalBound(inp_lb, inp_ub)
lb_fun, ub_fun = activation_relaxation.sigmoid_relaxation(inp_bound)
# Check that the bounds are valid
uniform_check_key = jax.random.PRNGKey(1)
self._check_bounds(uniform_check_key, sigmoid, lb_fun, ub_fun,
[inp_bound])
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(2)
self._check_convexity(cvx_check_key, lb_fun, [inp_bound], True)
ccv_check_key = jax.random.PRNGKey(3)
self._check_convexity(ccv_check_key, ub_fun, [inp_bound], False)
def test_sigmoid_parallel_different_setting(self):
# This is a reproduction of a bug that was identified where we were
# incorrectly computing the tangent point, when we were, in the same batch
# of point, having points where the upper bound was fully linear, fully
# sigmoid, or a mix.
lower = jnp.array([-100., 1., -3.])
upper = jnp.array([-1., 100., 3.])
inp_bound = IntervalBound(lower, upper)
lb_fun, ub_fun = activation_relaxation.sigmoid_relaxation(inp_bound)
# At the edge of the feasible domain, the convex relaxation should be tight.
at_lower_lb_gap = jnp.abs(lb_fun(lower) - jax.nn.sigmoid(lower))
at_lower_ub_gap = jnp.abs(ub_fun(lower) - jax.nn.sigmoid(lower))
at_upper_lb_gap = jnp.abs(lb_fun(upper) - jax.nn.sigmoid(upper))
at_upper_ub_gap = jnp.abs(ub_fun(upper) - jax.nn.sigmoid(upper))
self.assertAlmostEqual(at_lower_lb_gap.max(), 0.)
self.assertAlmostEqual(at_lower_ub_gap.max(), 0.)
self.assertAlmostEqual(at_upper_lb_gap.max(), 0.)
self.assertAlmostEqual(at_upper_ub_gap.max(), 0.)
@parameterized.named_parameters(
('small_scale', 0.01),
('normal_scale', 1),
('large_scale', 1e4),
('very_large_scale', 1e8))
def test_tanh(self, scale):
batch_size = 5
axis_dim = 8
tanh_inp_shape = (batch_size, axis_dim)
bound_key = jax.random.PRNGKey(0)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, tanh_inp_shape,
minval=-scale, maxval=scale)
inp_bound = IntervalBound(inp_lb, inp_ub)
lb_fun, ub_fun = activation_relaxation.tanh_relaxation(inp_bound)
# Check that the bounds are valid
uniform_check_key = jax.random.PRNGKey(1)
self._check_bounds(uniform_check_key, jnp.tanh, lb_fun, ub_fun,
[inp_bound])
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(2)
self._check_convexity(cvx_check_key, lb_fun, [inp_bound], True)
ccv_check_key = jax.random.PRNGKey(3)
self._check_convexity(ccv_check_key, ub_fun, [inp_bound], False)
@parameterized.named_parameters(
('all_included', 0.1),
('mixed', 2.),
('large_scale', 1e4),
('very_large_scale', 1e8))
def test_clip(self, scale):
batch_size = 5
axis_dim = 8
clip_inp_shape = (batch_size, axis_dim)
clip_fun = functools.partial(jnp.clip, a_min=-1., a_max=1.)
bound_key = jax.random.PRNGKey(0)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, clip_inp_shape,
minval=-scale, maxval=scale)
inp_bound = IntervalBound(inp_lb, inp_ub)
clip_relaxation = activation_relaxation.relaxation_fns[
synthetic_primitives.clip_p]
lb_fun, ub_fun = clip_relaxation.relaxation_fn(
inp_bound, a_min=-1., a_max=1.)
# Check that the bounds are valid
uniform_check_key = jax.random.PRNGKey(1)
self._check_bounds(uniform_check_key, clip_fun, lb_fun, ub_fun, [inp_bound])
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(2)
self._check_convexity(cvx_check_key, lb_fun, [inp_bound], True)
ccv_check_key = jax.random.PRNGKey(3)
self._check_convexity(ccv_check_key, ub_fun, [inp_bound], False)
def test_fusedrelu(self):
inp_dim = 5
out_dim = 7
param_key = jax.random.PRNGKey(0)
weight_key, bias_key = jax.random.split(param_key, 2)
lin_layer_weight = jax.random.normal(weight_key, (inp_dim, out_dim))
lin_layer_bias = jax.random.normal(bias_key, (out_dim,))
bound_key = jax.random.PRNGKey(1)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, (inp_dim,),
minval=-1., maxval=1.)
def linear_layer(inp, lin_weight, lin_bias):
return inp @ lin_weight + lin_bias
def fused_relu_model(inp, lin_weight, lin_bias, *_):
return jax.nn.relu(linear_layer(inp, lin_weight, lin_bias))
# Let's get the jaxpr corresponding to the function, similarly to what would
# be extracted by the synthetic primitives simplifier.
parsed = synthetic_primitives.make_jaxpr_nojit(
fused_relu_model, inp_lb, lin_layer_weight, lin_layer_bias)
inp_is_bound = {var: is_bound for var, is_bound
in zip(parsed.jaxpr.invars, [True, False, False])}
simplified_graph = synthetic_primitives.simplify_graph(
synthetic_primitives.fused_relu_simplifier, parsed.jaxpr, inp_is_bound)
linear_eqn = simplified_graph.eqns[0]
assert linear_eqn.primitive == synthetic_primitives.linear_p
relu_eqn = simplified_graph.eqns[1]
assert relu_eqn.primitive == synthetic_primitives.fused_relu_p
net_inp = IntervalBound(inp_lb, inp_ub)
linear_bound = jax_verify.interval_bound_propagation(
linear_layer, net_inp, lin_layer_weight, lin_layer_bias)
lb_fun, ub_fun = activation_relaxation.fused_relu_relaxation(
linear_bound, net_inp, lin_layer_weight, lin_layer_bias,
**relu_eqn.params)
# Check that the bounds are valid
def tied_inp_lb_fun(lin_inp, lin_weight, lin_bias):
lin_out = linear_layer(lin_inp, lin_weight, lin_bias)
return lb_fun(lin_out, lin_inp, lin_weight, lin_bias)
def tied_inp_ub_fun(lin_inp, lin_weight, lin_bias):
lin_out = linear_layer(lin_inp, lin_weight, lin_bias)
return ub_fun(lin_out, lin_inp, lin_weight, lin_bias)
all_inp_bounds = [net_inp,
IntervalBound(lin_layer_weight, lin_layer_weight),
IntervalBound(lin_layer_bias, lin_layer_bias)]
# Check that the bounds are valid
uniform_check_key = jax.random.PRNGKey(2)
self._check_bounds(
uniform_check_key, fused_relu_model, tied_inp_lb_fun, tied_inp_ub_fun,
all_inp_bounds)
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(3)
self._check_convexity(cvx_check_key, tied_inp_lb_fun, all_inp_bounds, True)
ccv_check_key = jax.random.PRNGKey(4)
self._check_convexity(ccv_check_key, tied_inp_ub_fun, all_inp_bounds, False)
def test_fusedrelu_conv(self):
height = 5
width = 5
inp_channels = 3
out_channels = 4
ker_size = 2
img_shape = (1, inp_channels, height, width)
ker_shape = (out_channels, inp_channels, ker_size, ker_size)
bias_shape = (1, out_channels, 1, 1)
param_key = jax.random.PRNGKey(0)
weight_key, bias_key = jax.random.split(param_key, 2)
lin_kernel_weight = jax.random.normal(weight_key, ker_shape)
lin_kernel_bias = jax.random.normal(bias_key, bias_shape)
bound_key = jax.random.PRNGKey(1)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, img_shape,
minval=-1., maxval=1.)
def linear_layer(inp, lin_kernel, lin_bias):
return jax.lax.conv(inp, lin_kernel, (1, 1), 'SAME') + lin_bias
def fused_relu_model(inp, lin_kernel, lin_bias):
return jax.nn.relu(linear_layer(inp, lin_kernel, lin_bias))
# Let's get the jaxpr corresponding to the function, similarly to what would
# be extracted by the synthetic primitives simplifier.
parsed = synthetic_primitives.make_jaxpr_nojit(
fused_relu_model, inp_lb, lin_kernel_weight, lin_kernel_bias)
inp_is_bound = {var: is_bound for var, is_bound
in zip(parsed.jaxpr.invars, [True, False, False])}
simplified_graph = synthetic_primitives.simplify_graph(
synthetic_primitives.fused_relu_simplifier, parsed.jaxpr, inp_is_bound)
linear_eqn = simplified_graph.eqns[0]
assert linear_eqn.primitive == synthetic_primitives.linear_p
relu_eqn = simplified_graph.eqns[1]
assert relu_eqn.primitive == synthetic_primitives.fused_relu_p
net_inp = IntervalBound(inp_lb, inp_ub)
linear_bound = jax_verify.interval_bound_propagation(
linear_layer, net_inp, lin_kernel_weight, lin_kernel_bias)
lb_fun, ub_fun = activation_relaxation.fused_relu_relaxation(
linear_bound, net_inp, lin_kernel_weight, lin_kernel_bias,
**relu_eqn.params)
# Check that the bounds are valid
def tied_inp_lb_fun(lin_inp, lin_kernel, lin_bias):
lin_out = linear_layer(lin_inp, lin_kernel, lin_bias)
return lb_fun(lin_out, lin_inp, lin_kernel, lin_bias)
def tied_inp_ub_fun(lin_inp, lin_kernel, lin_bias):
lin_out = linear_layer(lin_inp, lin_kernel, lin_bias)
return ub_fun(lin_out, lin_inp, lin_kernel, lin_bias)
all_inp_bounds = [net_inp,
IntervalBound(lin_kernel_weight, lin_kernel_weight),
IntervalBound(lin_kernel_bias, lin_kernel_bias)]
uniform_check_key = jax.random.PRNGKey(2)
self._check_bounds(
uniform_check_key, fused_relu_model, tied_inp_lb_fun, tied_inp_ub_fun,
all_inp_bounds)
# Sanity check the convexity of the relaxation
cvx_check_key = jax.random.PRNGKey(3)
self._check_convexity(cvx_check_key, tied_inp_lb_fun, all_inp_bounds, True)
ccv_check_key = jax.random.PRNGKey(4)
self._check_convexity(ccv_check_key, tied_inp_ub_fun, all_inp_bounds, False)
def test_equivalent_hypercube_fusedrelu_relaxation(self):
inp_dim = 10
out_dim = 50
param_key = jax.random.PRNGKey(0)
weight_key, bias_key = jax.random.split(param_key, 2)
lin_layer_weight = jax.random.normal(weight_key, (inp_dim, out_dim))
lin_layer_bias = jax.random.normal(bias_key, (out_dim,))
bound_key = jax.random.PRNGKey(1)
inp_lb, inp_ub = test_utils.sample_bounds(bound_key, (inp_dim,),
minval=-1., maxval=1.)
ub_fun = activation_relaxation.alt_fused_relu_hypercube_upper_bound(
inp_lb, inp_ub)
alt_ub_fun, _ = activation_relaxation.fused_relu_hypercube_upper_bound(
inp_lb, inp_ub)
all_neuron_ub_fun = functools.partial(jax.vmap(ub_fun,
in_axes=(1, 0, None)),
lin_layer_weight, lin_layer_bias)
all_neuron_alt_ub_fun = functools.partial(jax.vmap(alt_ub_fun,
in_axes=(1, 0, None)),
lin_layer_weight, lin_layer_bias)
batch_ub_fun = jax.vmap(all_neuron_ub_fun)
batch_alt_ub_fun = jax.vmap(all_neuron_alt_ub_fun)
samples_key = jax.random.PRNGKey(2)
samples = test_utils.sample_bounded_points(samples_key, (inp_lb, inp_ub),
nb_points=256, axis=0)
ub_out = batch_ub_fun(samples)
alt_ub_out = batch_alt_ub_fun(samples)
max_diff = jnp.abs(ub_out - alt_ub_out).max()
self.assertAlmostEqual(max_diff, 0., delta=1e-5)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/activation_relaxation_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for IntervalSimplex bounds."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
from jax_verify.src import simplex_bound
from jax_verify.src.linear import linear_relaxations
from jax_verify.src.types import Tensor
from jax_verify.tests import test_utils
def _check_bounds(inp_bound: simplex_bound.SimplexIntervalBound,
lin_coeffs: linear_relaxations.LinearExpression,
ref_bounds: Tensor):
nb_tests = lin_coeffs.shape[0]
# Let's compare the bound and make sure that they are all computed
# correctly.
lin_expr = linear_relaxations.LinearExpression(
lin_coeffs, jnp.zeros((nb_tests,)))
computed_bounds = (
simplex_bound.concretize_linear_function_simplexinterval_constraints(
lin_expr, inp_bound))
chex.assert_trees_all_close(ref_bounds, computed_bounds)
# Let's ensure that the linear bound offsets are correctly incorporated
# too.
offsets = jnp.arange(nb_tests).astype(jnp.float32)
lin_expr_with_offsets = linear_relaxations.LinearExpression(
lin_coeffs, offsets)
ref_bounds_with_offsets = ref_bounds + offsets
computed_bounds = (
simplex_bound.concretize_linear_function_simplexinterval_constraints(
lin_expr_with_offsets, inp_bound))
chex.assert_trees_all_close(ref_bounds_with_offsets, computed_bounds)
class SimplexIntervalBoundTest(absltest.TestCase):
def test_01inp_linear_bounds(self):
inp_bound = simplex_bound.SimplexIntervalBound(
jnp.array([0., 0., 0.]), jnp.array([1., 1., 1.]),
1.5)
lin_coeffs = jnp.array([
# Computing the ground truth is always going to correspond to
# picking completely the smallest value, and half of the second one.
[3., 2., 0.], # -> Pick 2, half of 1 -> 1.
[-3., -1., 2.], # -> Pick 0, half of 1 -> -3.5.
[1., -2., -5.], # -> Pick 2, half of 1 -> -6.
[-2., -3., -1.], # -> Pick 1, half of 0 -> -4.
# Testing with some ties.
[1., 1., 1.], # -> Pick whatever -> 1.5
[-2., 1., 1.], # -> Pick 0, half of either 1 or 2 -> -1.5
[2., 1., 1.], # -> Pick 1.5 out of (1, 2) -> 1.5.
])
ref_bounds = jnp.array([1., -3.5, -6., -4., 1.5, -1.5, 1.5])
_check_bounds(inp_bound, lin_coeffs, ref_bounds)
def test_fixedbysimplexinp_linear_bounds(self):
fixed_at_ub_inp_bound = simplex_bound.SimplexIntervalBound(
jnp.array([0., 0., 0.]), jnp.array([1., 2., 3.]),
6.)
lin_coeffs = jnp.array([
[1., 1., 1.],
[-1., 0., 1.],
[-3., -1., -2.],
[1., 3., -1.],
[0., 0., 0.]
])
# In any case, we need to pick up everything.
ref_bounds = lin_coeffs @ fixed_at_ub_inp_bound.upper
_check_bounds(fixed_at_ub_inp_bound, lin_coeffs, ref_bounds)
fixed_at_lb_inp_bound = simplex_bound.SimplexIntervalBound(
jnp.array([1., 2., 3.]), jnp.array([4., 6., 5.]),
6.)
ref_bounds = lin_coeffs @ fixed_at_lb_inp_bound.lower
_check_bounds(fixed_at_lb_inp_bound, lin_coeffs, ref_bounds)
def test_negativesimplex_linearbounds(self):
inp_bound = simplex_bound.SimplexIntervalBound(
-jnp.ones((3,)), jnp.ones((3,)), -2.)
lin_coeffs = jnp.array([
[1., 1., 1.],
[0., 1., 2.],
[-1., -2., -3.],
[-1., 0., 2.],
[1., -1., -1.]
])
# This is equivalent to solving the problem where we have a simplex sum of
# 1., except that we add the constant term where all the elements are set at
# the minimum of -1.
lin_exp = linear_relaxations.LinearExpression(
lin_coeffs, jnp.zeros(lin_coeffs.shape[0]))
eq_bound = simplex_bound.SimplexIntervalBound(jnp.zeros((3,)),
2*jnp.ones((3,)), 1.)
shifted_prob_sol = (
simplex_bound.concretize_linear_function_simplexinterval_constraints(
lin_exp, eq_bound))
ref_bounds = lin_coeffs @ inp_bound.lower + shifted_prob_sol
_check_bounds(inp_bound, lin_coeffs, ref_bounds)
def test_project_onto_bound(self):
shape = (1, 10)
lower, upper = test_utils.sample_bounds(jax.random.PRNGKey(0),
shape)
simplex_sum_interp = jax.random.uniform(jax.random.PRNGKey(1), ())
simplex_sum = lower.sum() + (upper - lower).sum() * simplex_sum_interp
inp_bound = simplex_bound.SimplexIntervalBound(lower, upper, simplex_sum) # pytype: disable=wrong-arg-types # jax-ndarray
test_tensor = jax.random.uniform(jax.random.PRNGKey(2), shape)
proj_test_tensor = inp_bound.project_onto_bound(test_tensor)
self.assertGreaterEqual((proj_test_tensor-lower).min(), 0.)
self.assertGreaterEqual((upper - proj_test_tensor).min(), 0.)
self.assertAlmostEqual(proj_test_tensor.sum(), simplex_sum)
def test_project_onto_bound_identity(self):
shape = (1, 10)
lower, upper = test_utils.sample_bounds(jax.random.PRNGKey(0),
shape)
simplex_sum_interp = jax.random.uniform(jax.random.PRNGKey(1), ())
simplex_sum = lower.sum() + (upper - lower).sum() * simplex_sum_interp
inp_bound = simplex_bound.SimplexIntervalBound(lower, upper, simplex_sum) # pytype: disable=wrong-arg-types # jax-ndarray
test_tensor = lower + (upper - lower) * simplex_sum_interp
proj_test_tensor = inp_bound.project_onto_bound(test_tensor)
chex.assert_trees_all_close(test_tensor, proj_test_tensor,
atol=1e-5, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/simplex_bound_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for linear relaxations, both fixed and parameterised."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
from jax import lax
import jax.numpy as jnp
from jax_verify.src import activation_relaxation
from jax_verify.src import bound_propagation
from jax_verify.src import synthetic_primitives
from jax_verify.src.linear import linear_relaxations
class LinearRelaxationsTest(parameterized.TestCase):
@parameterized.named_parameters(
('exp', lax.exp_p, jnp.exp, 0.),
('softplus', synthetic_primitives.softplus_p, jax.nn.softplus, 0.),
('posreciprocal',
synthetic_primitives.posreciprocal_p, lambda x: 1./x, 2.),
)
def test_convex_fn_relaxes_with_upper_chord(self, primitive, fn, shift):
parameterized_relaxer = linear_relaxations.parameterized_relaxer
linearizer = parameterized_relaxer.parameterized_linearizer(
(), primitive, ([4], True))
chex.assert_equal(linearizer.arity, 1)
lb = jnp.array([0., -1., -1., -1.]) + shift
ub = jnp.array([1., 1.1, 0., 1.]) + shift
input_bounds = bound_propagation.IntervalBound(lb, ub)
relax_params = [jnp.array([.3, .4, 1., .5])], ()
chex.assert_trees_all_equal_shapes(
linearizer.initial_params(input_bounds), relax_params)
relax_params = linearizer.project_params(relax_params)
lower, upper = linearizer.linearize(relax_params, input_bounds)
# Both bounds should be linear.
lower_grad = _elementwise_grad(lower)
upper_grad = _elementwise_grad(upper)
mid = (lb + ub) / 2.
chex.assert_trees_all_close(lower_grad(lb), lower_grad(mid), lower_grad(ub))
chex.assert_trees_all_close(upper_grad(lb), upper_grad(mid), upper_grad(ub))
# Lower bound should be a supporting hyperplane.
x = jnp.array([.3, -.16, 0., 0.]) + shift
fn_grad = _elementwise_grad(fn)
chex.assert_trees_all_close(lower(x), fn(x))
chex.assert_trees_all_close(lower_grad(x), fn_grad(x))
# Upper bound should be a chord.
chex.assert_trees_all_close(upper(lb), fn(lb))
chex.assert_trees_all_close(upper(ub), fn(ub))
@parameterized.named_parameters(
('sigmoid', synthetic_primitives.sigmoid_p,
activation_relaxation.sigmoid_relaxation),
)
def test_smooth_fn_relaxes_with_convex_relaxation(
self, primitive, convex_relaxation):
parameterized_relaxer = linear_relaxations.parameterized_relaxer
linearizer = parameterized_relaxer.parameterized_linearizer(
(), primitive, ([4], True))
chex.assert_equal(linearizer.arity, 1)
lb = jnp.array([0., -1., -1., -1.])
ub = jnp.array([1., 1.1, 0., 1.])
input_bounds = bound_propagation.IntervalBound(lb, ub)
relax_params = (
[jnp.array([.3, .4, 1., .5])],
[jnp.array([.4, .5, 0., .6])])
chex.assert_trees_all_equal_shapes(
linearizer.initial_params(input_bounds), relax_params)
relax_params = linearizer.project_params(relax_params)
lower, upper = linearizer.linearize(relax_params, input_bounds)
# Obtain the convex bounds. We can trust that these are valid, as they
# have been tested in `activation_relaxation_test.py`.
mu, eta = convex_relaxation(input_bounds)
# Both bounds should be linear.
lower_grad = _elementwise_grad(lower)
upper_grad = _elementwise_grad(upper)
mid = (lb + ub) / 2.
chex.assert_trees_all_close(lower_grad(lb), lower_grad(mid), lower_grad(ub))
chex.assert_trees_all_close(upper_grad(lb), upper_grad(mid), upper_grad(ub))
# Lower bound should be a supporting hyperplane of the convex lower bound.
x = jnp.array([.3, -.16, 0., 0.])
mu_grad = _elementwise_grad(mu)
chex.assert_trees_all_close(lower(x), mu(x))
chex.assert_trees_all_close(lower_grad(x), mu_grad(x))
# Upper bound should be a supporting hyperplane of the concave upper bound.
x = jnp.array([.4, .05, -1., .2])
eta_grad = _elementwise_grad(eta)
chex.assert_trees_all_close(upper(x), eta(x))
chex.assert_trees_all_close(upper_grad(x), eta_grad(x))
@parameterized.named_parameters(
('relu', synthetic_primitives.relu_p, jax.nn.relu, {}),
('leaky_relu', synthetic_primitives.leaky_relu_p, jax.nn.leaky_relu,
{'negative_slope': .2}),
('abs', lax.abs_p, jnp.abs, {}),
)
def test_relu_relaxes_with_upper_chord_and_subgradient(
self, primitive, fn, params):
parameterized_relaxer = linear_relaxations.parameterized_relaxer
linearizer = parameterized_relaxer.parameterized_linearizer(
(), primitive, ([6], True), **params)
chex.assert_equal(linearizer.arity, 1)
lb = jnp.array([0., -1., -1., -1., -3.5, 2.1])
ub = jnp.array([1., 1.1, 0., 1., -2.5, 3.6])
input_bounds = bound_propagation.IntervalBound(lb, ub)
relax_params = jnp.array([.3, .4, 1., .5, .6, .7]), ()
chex.assert_trees_all_equal_shapes(
linearizer.initial_params(input_bounds), relax_params)
relax_params = linearizer.project_params(relax_params)
lower, upper = linearizer.linearize(relax_params, input_bounds)
# Both bounds should be linear.
lower_grad = _elementwise_grad(lower)
upper_grad = _elementwise_grad(upper)
mid = (lb + ub) / 2.
chex.assert_trees_all_close(lower_grad(lb), lower_grad(mid), lower_grad(ub))
chex.assert_trees_all_close(upper_grad(lb), upper_grad(mid), upper_grad(ub))
# Lower bound should be a subgradient at the origin.
z = jnp.zeros_like(lb)
chex.assert_trees_all_close(lower(z), fn(z, **params))
for x in (lb, ub):
chex.assert_trees_all_close(
lower(x) < fn(x, **params) + 1.e-6,
jnp.ones_like(lb, dtype=jnp.bool_))
# Upper bound should be a chord.
chex.assert_trees_all_close(upper(lb), fn(lb, **params))
chex.assert_trees_all_close(upper(ub), fn(ub, **params))
def _elementwise_grad(f):
return jax.grad(lambda x: jnp.sum(f(x)))
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/linear_relaxations_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Backward linear bounds (Crown / RVT)."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src import concretization
from jax_verify.src import ibp
from jax_verify.src import optimizers
from jax_verify.src.linear import backward_crown
from jax_verify.src.linear import linear_relaxations
from jax_verify.tests import test_utils
import numpy as np
import optax
class BackwardCrownBoundTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0., delta=1e-5)
def test_fc_crown(self):
@hk.without_apply_rng
@hk.transform
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(linear_model.apply, params)
# Test with standard interval bounds.
input_bounds = jax_verify.IntervalBound(z-1., z+1.)
output_bounds = jax_verify.backward_crown_bound_propagation(
fun, input_bounds)
self.assertArrayAlmostEqual(5., output_bounds.lower)
self.assertArrayAlmostEqual(11., output_bounds.upper)
def test_conv2d_crown(self):
@hk.without_apply_rng
@hk.transform
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(conv2d_model.apply, params)
# Test with standard interval bounds
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.backward_crown_bound_propagation(
fun, input_bounds)
self.assertArrayAlmostEqual(8., output_bounds.lower)
self.assertArrayAlmostEqual(16., output_bounds.upper)
def test_dynamic_slice(self):
z = jnp.arange(24).reshape((2, 3, 4))
fun = lambda x: jax.lax.dynamic_slice(x, (1, 2, 3), (2, 1, 1))
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.backward_crown_bound_propagation(
fun, input_bounds)
self.assertArrayAlmostEqual(output_bounds.lower,
fun(input_bounds.lower))
self.assertArrayAlmostEqual(output_bounds.upper,
fun(input_bounds.upper))
def test_relu_crown(self):
def relu_model(inp):
return jax.nn.relu(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
output_bounds = jax_verify.backward_crown_bound_propagation(
relu_model, input_bounds)
self.assertArrayAlmostEqual(jnp.array([[0., 2.]]), output_bounds.lower)
self.assertArrayAlmostEqual(jnp.array([[0., 4.]]), output_bounds.upper)
def test_abs_crown(self):
def abs_model(inp):
return jnp.abs(inp)
abs_inp_shape = (4, 7)
lb, ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), abs_inp_shape, minval=-10., maxval=10.)
input_bounds = jax_verify.IntervalBound(lb, ub)
output_bounds = jax_verify.backward_crown_bound_propagation(
abs_model, input_bounds)
uniform_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(1),
(lb, ub), 100)
uniform_outs = jax.vmap(abs_model)(uniform_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for AbsValue. The gap '
'between upper bound and empirical max is < 0')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for AbsValue. The gap'
'between emp. min and lower bound is negative.')
def test_leaky_relu_crown(self):
def leaky_relu_model(inp):
return jax.nn.leaky_relu(inp)
leaky_relu_inp_shape = (4, 7)
lb, ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), leaky_relu_inp_shape, minval=-10., maxval=10.)
input_bounds = jax_verify.IntervalBound(lb, ub)
output_bounds = jax_verify.backward_crown_bound_propagation(
leaky_relu_model, input_bounds)
uniform_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(1),
(lb, ub), 100)
uniform_outs = jax.vmap(leaky_relu_model)(uniform_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for LeakyReLU. The gap '
'between upper bound and empirical max is < 0')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for LeakyRelu. The gap'
'between emp. min and lower bound is negative.')
def test_exp_crown(self):
def exp_model(inp):
return jnp.exp(inp)
exp_inp_shape = (4, 7)
lb, ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), exp_inp_shape, minval=-10., maxval=10.)
input_bounds = jax_verify.IntervalBound(lb, ub)
output_bounds = jax_verify.backward_crown_bound_propagation(
exp_model, input_bounds)
uniform_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(1),
(lb, ub), 100)
uniform_outs = jax.vmap(exp_model)(uniform_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for Exponential. The gap '
'between upper bound and empirical max is < 0')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for Exponential. The gap'
'between emp. min and lower bound is negative.')
def test_multiply_crown(self):
def multiply_model(lhs, rhs):
return lhs * rhs
mul_inp_shape = (4, 7)
lhs_lb, lhs_ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), mul_inp_shape, minval=-10., maxval=10.)
rhs_lb, rhs_ub = test_utils.sample_bounds(
jax.random.PRNGKey(1), mul_inp_shape, minval=-10., maxval=10.)
lhs_bounds = jax_verify.IntervalBound(lhs_lb, lhs_ub)
rhs_bounds = jax_verify.IntervalBound(rhs_lb, rhs_ub)
output_bounds = jax_verify.backward_crown_bound_propagation(
multiply_model, lhs_bounds, rhs_bounds)
uniform_lhs_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(2),
(lhs_lb, lhs_ub), 100)
uniform_rhs_inps = test_utils.sample_bounded_points(jax.random.PRNGKey(3),
(rhs_lb, rhs_ub), 100)
uniform_outs = jax.vmap(multiply_model)(uniform_lhs_inps, uniform_rhs_inps)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for Multiply. The gap '
'between upper bound and empirical max is negative')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for Multiply. The gap'
'between emp. min and lower bound is negative.')
def test_nobatch_batch_inputs(self):
batch_shape = (3, 2)
unbatch_shape = (2, 4)
def bilinear_model(inp_1, inp_2):
return jnp.einsum('bh,hH->bH', inp_1, inp_2)
lb_1, ub_1 = test_utils.sample_bounds(jax.random.PRNGKey(0), batch_shape,
minval=-10, maxval=10.)
lb_2, ub_2 = test_utils.sample_bounds(jax.random.PRNGKey(1), unbatch_shape,
minval=-10, maxval=10.)
bound_1 = jax_verify.IntervalBound(lb_1, ub_1)
bound_2 = jax_verify.IntervalBound(lb_2, ub_2)
output_bounds = backward_crown.backward_crown_bound_propagation(
bilinear_model, bound_1, bound_2)
uniform_1 = test_utils.sample_bounded_points(jax.random.PRNGKey(2),
(lb_1, ub_1), 100)
uniform_2 = test_utils.sample_bounded_points(jax.random.PRNGKey(3),
(lb_2, ub_2), 100)
uniform_outs = jax.vmap(bilinear_model)(uniform_1, uniform_2)
empirical_min = uniform_outs.min(axis=0)
empirical_max = uniform_outs.max(axis=0)
self.assertGreaterEqual((output_bounds.upper - empirical_max).min(), 0.,
'Invalid upper bound for mix of batched/unbatched'
'input bounds.')
self.assertGreaterEqual((empirical_min - output_bounds.lower).min(), 0.,
'Invalid lower bound for mix of batched/unbatched'
'input bounds.')
def test_equal_bounds_parameterized(self):
model = jax.nn.relu
sample_value = jnp.array([-1., 1.])
inp_bound = jax_verify.IntervalBound(sample_value, sample_value)
optimizer = optimizers.OptaxOptimizer(optax.adam(1e-3), num_steps=10)
concretizer = concretization.ChunkedBackwardConcretizer(
backward_crown.OptimizingLinearBoundBackwardTransform(
linear_relaxations.parameterized_relaxer,
backward_crown.CONCRETIZE_ARGS_PRIMITIVE,
optimizer))
algorithm = concretization.BackwardConcretizingAlgorithm(concretizer)
bound, _ = bound_propagation.bound_propagation(
algorithm, model, inp_bound)
np.testing.assert_array_almost_equal(bound.lower, bound.upper) # pytype: disable=attribute-error # jax-ndarray
def test_forwardconcretization_withbackwardalg_reference_out(self):
architecture = [2, 4, 4, 2]
problem_key = jax.random.PRNGKey(42)
fun, (lb, ub) = test_utils.set_up_toy_problem(problem_key, 2,
architecture)
def model_fun(inp):
out = fun(inp)
select = jnp.array([[True, False]])
# We are causing the last operation to be a select, to force it to be a
# reference.
return jnp.where(select, out, -1.* out)
inp_bound = jax_verify.IntervalBound(lb, ub)
optimizer = optimizers.OptaxOptimizer(optax.adam(1e-3), num_steps=10)
concretizer = concretization.ChunkedBackwardConcretizer(
backward_crown.OptimizingLinearBoundBackwardTransform(
linear_relaxations.parameterized_relaxer,
backward_crown.CONCRETIZE_ARGS_PRIMITIVE,
optimizer))
algorithm = concretization.BackwardAlgorithmForwardConcretization(
ibp.bound_transform, concretizer)
# This used to cause an exception as the backward discovery of nodes
# needing relaxation was not working. The Scanner was not being
# propagated backwards.
bound_propagation.bound_propagation(algorithm, model_fun, inp_bound)
def test_keyword_and_flat_params(self):
def model_fun(p_dict, a):
elt_1 = p_dict['elt_1']
elt_2 = p_dict['elt_2']
interm = jax.nn.relu(elt_1 @ elt_2)
return jnp.sum(interm + a)
p_dict = {'elt_1': jnp.ones((2, 10)),
'elt_2': jax_verify.IntervalBound(jnp.zeros((10, 2)),
jnp.ones((10, 2)))}
a = jax_verify.IntervalBound(jnp.zeros((2, 2)),
jnp.ones((2, 2)))
# This used to cause an exception due to a bug in the implementation
# of concretization.BackwardConcretizationAlgorithm, when the inputs bound
# get filled in the backward_env.
backward_crown.backward_crown_bound_propagation(model_fun, p_dict, a)
@parameterized.named_parameters(
('crown', linear_relaxations.crown_rvt_relaxer),
('fastlin', linear_relaxations.fastlin_rvt_relaxer),
('parameterized', linear_relaxations.parameterized_relaxer),
)
def test_chunking(self, relaxer):
batch_size = 3
input_size = 2
hidden_size = 5
final_size = 4
input_shape = (batch_size, input_size)
hidden_lay_weight_shape = (input_size, hidden_size)
final_lay_weight_shape = (hidden_size, final_size)
inp_lb, inp_ub = test_utils.sample_bounds(
jax.random.PRNGKey(0), input_shape,
minval=-1., maxval=1.)
inp_bound = jax_verify.IntervalBound(inp_lb, inp_ub)
hidden_lay_weight = jax.random.uniform(jax.random.PRNGKey(1),
hidden_lay_weight_shape)
final_lay_weight = jax.random.uniform(jax.random.PRNGKey(2),
final_lay_weight_shape)
def model_fun(inp):
hidden = inp @ hidden_lay_weight
act = jax.nn.relu(hidden)
final = act @ final_lay_weight
return final
if isinstance(relaxer, linear_relaxations.ParameterizedLinearBoundsRelaxer):
optimizer = optimizers.OptaxOptimizer(optax.adam(1e-3), num_steps=10)
concretizing_transform = (
backward_crown.OptimizingLinearBoundBackwardTransform(
relaxer, backward_crown.CONCRETIZE_ARGS_PRIMITIVE,
optimizer))
else:
concretizing_transform = backward_crown.LinearBoundBackwardTransform(
relaxer, backward_crown.CONCRETIZE_ARGS_PRIMITIVE)
chunked_concretizer = concretization.ChunkedBackwardConcretizer(
concretizing_transform, max_chunk_size=16)
unchunked_concretizer = concretization.ChunkedBackwardConcretizer(
concretizing_transform, max_chunk_size=0)
chunked_algorithm = concretization.BackwardConcretizingAlgorithm(
chunked_concretizer)
unchunked_algorithm = concretization.BackwardConcretizingAlgorithm(
unchunked_concretizer)
chunked_bound, _ = bound_propagation.bound_propagation(
chunked_algorithm, model_fun, inp_bound)
unchunked_bound, _ = bound_propagation.bound_propagation(
unchunked_algorithm, model_fun, inp_bound)
np.testing.assert_array_almost_equal(chunked_bound.lower, # pytype: disable=attribute-error # jax-ndarray
unchunked_bound.lower) # pytype: disable=attribute-error # jax-ndarray
np.testing.assert_array_almost_equal(chunked_bound.upper, # pytype: disable=attribute-error # jax-ndarray
unchunked_bound.upper) # pytype: disable=attribute-error # jax-ndarray
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/backward_crown_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for branching decisions."""
from absl.testing import absltest
import chex
import jax.numpy as jnp
from jax_verify.src import ibp
from jax_verify.src.branching import branch_selection
class BranchSelectionTest(chex.TestCase):
def test_jittable_branching_decisions_enforced(self):
free_bounds = ibp.IntervalBound(-jnp.ones(4,),
jnp.ones(4,))
layer_index = (1, 2)
branching_decision_list = [
# Neuron 0 is greater than 0.
branch_selection.BranchDecision(layer_index, 0, 0., 1),
# Neuron 1 is smaller than 0.5
branch_selection.BranchDecision(layer_index, 1, 0.5, -1),
# Neuron 2 is between -0.3 and 0.3
branch_selection.BranchDecision(layer_index, 2, -0.3, 1),
branch_selection.BranchDecision(layer_index, 2, 0.3, -1),
# Neuron 3 is below 2., which is a spurious constraint
branch_selection.BranchDecision(layer_index, 3, 2., -1)
]
branching_decisions_tensors = branch_selection.branching_decisions_tensors(
branching_decision_list, 3, 8)
enforced_bounds = branch_selection.enforce_jittable_branching_decisions(
branching_decisions_tensors, layer_index, free_bounds)
chex.assert_trees_all_close((enforced_bounds.lower, enforced_bounds.upper),
(jnp.array([0., -1., -0.3, -1.]),
jnp.array([1., 0.5, 0.3, 1.])))
# check that the bounds are not modified when enforced on another layer.
other_lay_bound = branch_selection.enforce_jittable_branching_decisions(
branching_decisions_tensors, (1, 3), free_bounds)
chex.assert_trees_all_close((free_bounds.lower, free_bounds.upper),
(other_lay_bound.lower, other_lay_bound.upper))
def test_infeasible_bounds_detection(self):
non_crossing_bounds = ibp.IntervalBound(jnp.zeros(3,), jnp.ones(3,))
crossing_bounds = ibp.IntervalBound(jnp.array([0., 0., 1.]),
jnp.array([1., 1., 0.5]))
non_crossing_infeasible = branch_selection.infeasible_bounds(
non_crossing_bounds.to_jittable())
self.assertFalse(non_crossing_infeasible)
crossing_infeasible = branch_selection.infeasible_bounds(
crossing_bounds.to_jittable())
self.assertTrue(crossing_infeasible)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/branch_selection_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_verify opt_utils."""
import functools
from absl.testing import absltest
import chex
import jax
from jax import numpy as jnp
from jax_verify.src import opt_utils
import numpy as np
class OptUtilsTest(absltest.TestCase):
def test_greedy_assign(self):
# Build a list of upper bounds, sum, and the expected greedy assginment.
problems = [
(0.5 * jnp.ones(5,), 2.5, 0.5 * jnp.ones(5,)),
(0.5 * jnp.ones(5,), 1.0, jnp.array([0.5, 0.5, 0., 0., 0.])),
(0.5 * jnp.ones(5,), 0.75, jnp.array([0.5, 0.25, 0., 0., 0.])),
(0.5 * jnp.ones(5,), 0.3, jnp.array([0.3, 0., 0., 0., 0.])),
(jnp.array([0., 1., 0., 0.5]), 1.2, jnp.array([0., 1., 0., 0.2])),
(jnp.array([1., 2., 3.]), 2.5, jnp.array([1., 1.5, 0.]))
]
for upper, total_sum, ref_answer in problems:
# Try the forward assignment.
pred = opt_utils.greedy_assign(upper, total_sum)
chex.assert_trees_all_close(pred, ref_answer)
def test_1d_binary_search(self):
for seed in range(10):
argmax = jax.random.uniform(jax.random.PRNGKey(seed), ())
# Try out two possible types of concave function for which we know the
# maximum.
ccv_fun = lambda x, argmax=argmax: -(x - argmax)**2
pred_argmax, max_val = opt_utils.concave_1d_max(
ccv_fun, jnp.zeros(()), jnp.ones(()), num_steps=64)
self.assertAlmostEqual(max_val, 0., delta=1e-6) # pytype: disable=wrong-arg-types # jax-ndarray
self.assertAlmostEqual(pred_argmax, argmax, delta=1e-6)
alt_ccv_fun = lambda x, argmax=argmax: -jnp.abs(x - argmax)
pred_argmax, max_val = opt_utils.concave_1d_max(
alt_ccv_fun, jnp.zeros(()), jnp.ones(()), num_steps=64)
self.assertAlmostEqual(max_val, 0., delta=1e-6) # pytype: disable=wrong-arg-types # jax-ndarray
self.assertAlmostEqual(pred_argmax, argmax, delta=1e-6)
x, y = opt_utils.concave_1d_max(
lambda x: -x**2 + 4.*x - 3., # max at x=2, y=1
jnp.array([0., -11., 10.]),
jnp.array([3., -10., 11.]),
)
np.testing.assert_array_almost_equal(x, np.array([2., -10., 10.]),
decimal=3)
np.testing.assert_array_almost_equal(y, np.array([1., -143., -63.]),
decimal=4)
def test_simplex_projection_fully_constrained(self):
# Test the edge case of an simplex sum with one element.
# This should always give the simplex_sum if it's in the valid bounds.
all_initial_values = jnp.expand_dims(jnp.linspace(-10., 10., 100), 1)
project_onto_01 = functools.partial(opt_utils.project_onto_interval_simplex,
jnp.zeros((1,)), jnp.ones((1,)),
1.0)
batch_project_onto_01 = jax.vmap(project_onto_01)
all_res = batch_project_onto_01(all_initial_values)
self.assertAlmostEqual(all_res.min(), 1.0, delta=1e-6)
self.assertAlmostEqual(all_res.max(), 1.0, delta=1e-6)
project_onto_03 = functools.partial(opt_utils.project_onto_interval_simplex,
jnp.zeros((1,)), 3*jnp.ones((1,)),
1.0)
batch_project_onto_03 = jax.vmap(project_onto_03)
all_res = batch_project_onto_03(all_initial_values)
self.assertAlmostEqual(all_res.min(), 1.0, delta=1e-6)
self.assertAlmostEqual(all_res.max(), 1.0, delta=1e-6)
key = jax.random.PRNGKey(0)
initial_values = jax.random.uniform(key, (100, 5), minval=-10, maxval=10)
# There is only one valid solution to this problem: everything is 1.
project = functools.partial(opt_utils.project_onto_interval_simplex,
jnp.zeros((5,)), jnp.ones((5,)), 5.0)
batch_project = jax.vmap(project)
all_res = batch_project(initial_values)
self.assertAlmostEqual(all_res.min(), 1.0, delta=1e-6)
self.assertAlmostEqual(all_res.max(), 1.0, delta=1e-6)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/opt_utils_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the extraction of concretizing values."""
from absl.testing import absltest
import chex
import jax.numpy as jnp
from jax_verify.src import ibp
from jax_verify.src.branching import branch_utils
from jax_verify.src.linear import linear_relaxations
class ConcretizingInputTest(chex.TestCase):
def test_linf_bound_concretizing_inputs(self):
# Create a stack of two linear expression to test things.
linexp = linear_relaxations.LinearExpression(
jnp.stack([jnp.ones((2, 3)),
-jnp.ones((2, 3))]),
jnp.zeros((2,)))
input_bound = ibp.IntervalBound(-2 * jnp.ones((2, 3)),
2 * jnp.ones((2, 3)))
concretizing_inp = branch_utils.minimizing_concretizing_input(
linexp, input_bound)
# Check that the shape of the concretizing inp for each linexp is of the
# shape of the input.
chex.assert_shape(concretizing_inp, (2, 2, 3))
# Evaluating the bound given by the concretizing inp
bound_by_concinp = ((linexp.lin_coeffs * concretizing_inp).sum(axis=(1, 2))
+ linexp.offset)
# Result by concretizing directly:
concretized_bound = linear_relaxations.concretize_linear_expression(
linexp, input_bound)
chex.assert_trees_all_close(bound_by_concinp, concretized_bound)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/branch_utils_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for solving the convex relaxation using CVXPY."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.src import bound_propagation
from jax_verify.src.mip_solver import cvxpy_relaxation_solver
from jax_verify.src.mip_solver import relaxation
class CVXPYRelaxationTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0.)
def get_bounds(self, fun, input_bounds):
output = fun(input_bounds.lower)
boundprop_transform = jax_verify.ibp_transform
relaxation_transform = relaxation.RelaxationTransform(boundprop_transform)
var, env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(relaxation_transform),
fun, input_bounds)
objective_bias = 0.
index = 0
lower_bounds = []
upper_bounds = []
for output_idx in range(output.size):
objective = (jnp.arange(output.size) == output_idx).astype(jnp.float32)
lower_bound, _, _ = relaxation.solve_relaxation(
cvxpy_relaxation_solver.CvxpySolver, objective, objective_bias,
var, env, index)
neg_upper_bound, _, _ = relaxation.solve_relaxation(
cvxpy_relaxation_solver.CvxpySolver, -objective, objective_bias,
var, env, index)
lower_bounds.append(lower_bound)
upper_bounds.append(-neg_upper_bound)
return jnp.array(lower_bounds), jnp.array(upper_bounds)
def test_linear_cvxpy_relaxation(self):
def linear_model(inp):
return hk.Linear(1)(inp)
z = jnp.array([[1., 2., 3.]])
params = {'linear':
{'w': jnp.ones((3, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(
hk.without_apply_rng(hk.transform(linear_model)).apply,
params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
lower_bounds, upper_bounds = self.get_bounds(fun, input_bounds)
self.assertAlmostEqual(5., lower_bounds)
self.assertAlmostEqual(11., upper_bounds)
def test_conv1d_cvxpy_relaxation(self):
def conv1d_model(inp):
return hk.Conv1D(output_channels=1, kernel_shape=2,
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([3., 4.])
z = jnp.reshape(z, [1, 2, 1])
params = {'conv1_d':
{'w': jnp.ones((2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(
hk.without_apply_rng(hk.transform(conv1d_model)).apply,
params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
lower_bounds, upper_bounds = self.get_bounds(fun, input_bounds)
self.assertAlmostEqual(7., lower_bounds, delta=1e-5)
self.assertAlmostEqual(11., upper_bounds, delta=1e-5)
def test_conv2d_cvxpy_relaxation(self):
def conv2d_model(inp):
return hk.Conv2D(output_channels=1, kernel_shape=(2, 2),
padding='VALID', stride=1, with_bias=True)(inp)
z = jnp.array([1., 2., 3., 4.])
z = jnp.reshape(z, [1, 2, 2, 1])
params = {'conv2_d':
{'w': jnp.ones((2, 2, 1, 1), dtype=jnp.float32),
'b': jnp.array([2.])}}
fun = functools.partial(
hk.without_apply_rng(hk.transform(conv2d_model)).apply,
params)
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
lower_bounds, upper_bounds = self.get_bounds(fun, input_bounds)
self.assertAlmostEqual(8., lower_bounds)
self.assertAlmostEqual(16., upper_bounds)
def test_relu_cvxpy_relaxation(self):
def relu_model(inp):
return jax.nn.relu(inp)
z = jnp.array([[-2., 3.]])
input_bounds = jax_verify.IntervalBound(z - 1., z + 1.)
lower_bounds, upper_bounds = self.get_bounds(relu_model, input_bounds)
self.assertArrayAlmostEqual(jnp.array([[0., 2.]]), lower_bounds)
self.assertArrayAlmostEqual(jnp.array([[0., 4.]]), upper_bounds)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/cvxpy_relaxation_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Small helper functions for testing."""
import jax
import jax.numpy as jnp
import jax.random as random
from jax_verify.extensions.sdp_verify import utils
import numpy as np
################## Toy Networks ######################
def _init_fc_layer(key, n_in, n_out):
k1, k2 = random.split(key)
W = random.normal(k1, (n_in, n_out))/ n_in
b = random.normal(k2, (n_out,))/ n_in
return W, b
def _init_conv_layer(key, n_h, n_w, n_cout, n_cin):
"""Random weights for a conv layer.
Args:
key: PRNG key
n_h: Kernel Height
n_w: Kernel Width
n_cout: Out Channels
n_cin: In Channels
Returns:
W: weights of the conv filters
b: biases for the conv layer
"""
k1, k2 = random.split(key)
W = random.normal(k1, (n_h, n_w, n_cin, n_cout))
b = random.normal(k2, (n_cout,))
return W, b
def make_mlp_params(layer_sizes, key):
sizes = layer_sizes
keys = random.split(key, len(sizes))
params = list(map(_init_fc_layer, keys[1:], sizes[:-1], sizes[1:]))
return params
def make_cnn_params(layer_sizes, key):
"""Initialize a random network with conv layers, followed by linear layers.
Args:
layer_sizes: Layer-0 is the shape of the input in the NHWC format
e.g. (1, 32, 32, 3). List of layer-wise hyperpameters. Dict of the following
form for a conv-layer: {'n_h': 3, 'n_w': 3, 'n_cout': 32,
'padding': 'VALID', 'stride': 1, 'n_cin': 3, input_shape:}. Input shape only
if it's the first layer. For an FC layer, a single int corresponding to the
number of op neurons.
key: PRNG key
Returns:
W: weights of the conv filters
b: biases for the conv layer
"""
sizes = layer_sizes
keys = random.split(key, len(sizes))
params = []
# NHWC, assert square
assert len(layer_sizes[0]) == 4
assert layer_sizes[0][1] == layer_sizes[0][2]
conv_check = 0
input_shape = layer_sizes[0][1]
for counter, size in enumerate(layer_sizes[1:]):
if isinstance(size, dict):
size['input_shape'] = input_shape
size['W'], size['b'] = _init_conv_layer(keys[counter], size['n_h'],
size['n_w'], size['n_cout'],
size['n_cin'])
if size['padding'] == 'VALID':
input_shape = int(np.ceil(input_shape - size['n_h'] +1)/size['stride'])
else:
input_shape = int(np.ceil(input_shape/size['stride']))
size['output_shape'] = input_shape
params.append(size)
elif isinstance(size, int):
# Check layer is FC
if conv_check == 0:
input_shape = input_shape * input_shape * layer_sizes[counter]['n_cout']
conv_check = 1
params.append(_init_fc_layer(keys[counter], input_shape, size))
input_shape = size
else:
raise NotImplementedError('Unknown layer')
return params
################## Toy Verification Instances ####################
def make_toy_verif_instance(seed=None, label=None, target_label=None, nn='mlp'):
"""Mainly used for unit testing."""
key = jax.random.PRNGKey(0) if seed is None else jax.random.PRNGKey(seed)
if nn == 'mlp':
layer_sizes = '5, 5, 5'
layer_sizes = np.fromstring(layer_sizes, dtype=int, sep=',')
params = make_mlp_params(layer_sizes, key)
inp_shape = (1, layer_sizes[0])
else:
if nn == 'cnn_simple':
pad = 'VALID'
# Input and filter size match -> filter is applied at just one location.
else:
pad = 'SAME'
# Input is padded on right/bottom to form 3x3 input
layer_sizes = [(1, 2, 2, 1), {
'n_h': 2,
'n_w': 2,
'n_cout': 2,
'padding': pad,
'stride': 1,
'n_cin': 1
}, 3]
inp_shape = layer_sizes[0]
params = make_cnn_params(layer_sizes, key)
bounds = utils.boundprop(
params,
utils.IntBound(lb=np.zeros(inp_shape),
ub=1*np.ones(inp_shape),
lb_pre=None,
ub_pre=None)
)
target_label = 1 if target_label is None else target_label
label = 2 if label is None else label
verif_instance = utils.make_nn_verif_instance(
params,
bounds,
target_label=target_label,
label=label,
input_bounds=(0., 1.))
return verif_instance
def make_mlp_layer_from_conv_layer(layer_params, input_bounds):
"""Convert Conv Layer into equivalent MLP layer."""
assert isinstance(layer_params, dict)
assert layer_params['padding'] == 'SAME'
assert layer_params['stride'] == 1
assert layer_params['n_cin'] == 1
# only 'SAME' padding supported for now with stride (1,1)
# to be used for unit-test support only
# TODO: Add support for 'VALID'
inp_shape = (layer_params['input_shape'], layer_params['input_shape'])
w, b = layer_params['W'], layer_params['b']
op_shape = int(np.ceil(inp_shape[0] / layer_params['stride']))
pad_h = max((op_shape - 1) * layer_params['stride'] + layer_params['n_h'] -
inp_shape[0], 0)
pad_t = pad_h // 2
pad_b = pad_h - pad_t
pad_inp_shape = [inp_shape[0] + pad_h, inp_shape[1] + pad_h]
padded_bounds = jnp.zeros(pad_inp_shape)
lb = padded_bounds.at[pad_t:-pad_b, pad_t:-pad_b].add(input_bounds.lb[0, :, :,
0])
ub = padded_bounds.at[pad_t:-pad_b, pad_t:-pad_b].add(input_bounds.ub[0, :, :,
0])
pad_filter_shape = pad_inp_shape + [inp_shape[0], inp_shape[1], w.shape[-1]]
pad_filter = jnp.zeros(pad_filter_shape)
pad_bias = jnp.zeros(inp_shape + (w.shape[-1],))
n_h, n_w = w.shape[0], w.shape[1]
# unrolling the conv into an FC layer, stride=(1,1)
for i in range(inp_shape[0]):
for j in range(inp_shape[1]):
pad_filter = pad_filter.at[i:i + n_h, j:j + n_w, i, j, 0].add(w[:, :, 0,
0])
pad_bias = pad_bias.at[i, j, 0].add(b[0])
pad_filter = pad_filter.at[i:i + n_h, j:j + n_w, i, j, 1].add(w[:, :, 0,
1])
pad_bias = pad_bias.at[i, j, 1].add(b[1])
pad_filter_lin = jnp.reshape(
pad_filter,
(pad_inp_shape[0] * pad_inp_shape[1], inp_shape[0] * inp_shape[1] * 2))
pad_bias_lin = jnp.reshape(pad_bias, inp_shape[0] * inp_shape[1] * 2)
return lb, ub, pad_filter_lin, pad_bias_lin
def make_mlp_verif_instance_from_cnn(verif_instance):
"""Convert CNN verif-instance into equivalent MLP verif-instance."""
params_cnn = verif_instance.params_full
assert not any([isinstance(x, dict) for x in params_cnn[1:]])
# Only supports networks with structure conv-{fc}*
weights = []
for layer_params in params_cnn:
if isinstance(layer_params, dict):
lb, ub, w_lin, b_lin = make_mlp_layer_from_conv_layer(
layer_params, verif_instance.bounds[0])
lb = jnp.reshape(lb, (1, -1)) * 0.
ub = jnp.reshape(ub, (1, -1)) * 1.
weights.append((w_lin, b_lin))
else:
weights.append(layer_params)
return lb, ub, weights
| jax_verify-master | jax_verify/tests/sdp_verify/test_utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for problem_from_graph.py."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
import jax_verify
from jax_verify.extensions.sdp_verify import problem
from jax_verify.extensions.sdp_verify import problem_from_graph
from jax_verify.extensions.sdp_verify import sdp_verify
from jax_verify.extensions.sdp_verify import utils
from jax_verify.src import ibp
from jax_verify.tests.sdp_verify import test_utils
class SdpProblemTest(parameterized.TestCase):
def assertArrayAlmostEqual(self, lhs, rhs):
self.assertEqual(lhs is None, rhs is None)
if lhs is not None:
diff = jnp.abs(lhs - rhs).max()
self.assertAlmostEqual(diff, 0., places=5)
def test_sdp_problem_equivalent_to_sdp_verify(self):
# Set up a verification problem for test purposes.
verif_instance = test_utils.make_toy_verif_instance(label=2, target_label=1)
# Set up a spec function that replicates the test problem.
inputs = jnp.zeros((1, 5))
input_bounds = jax_verify.IntervalBound(
jnp.zeros_like(inputs), jnp.ones_like(inputs))
boundprop_transform = ibp.bound_transform
def spec_fn(x):
x = utils.predict_mlp(verif_instance.params, x)
x = jax.nn.relu(x)
return jnp.sum(
jnp.reshape(x, (-1,)) * verif_instance.obj) + verif_instance.const
# Build an SDP verification instance using the code under test.
sdp_relu_problem = problem_from_graph.SdpReluProblem(
boundprop_transform, spec_fn, input_bounds)
sdp_problem_vi = sdp_relu_problem.build_sdp_verification_instance()
# Build an SDP verification instance using existing `sdp_verify` code.
sdp_verify_vi = problem.make_sdp_verif_instance(verif_instance)
self._assert_verif_instances_equal(sdp_problem_vi, sdp_verify_vi)
def _assert_verif_instances_equal(self, sdp_problem_vi, sdp_verify_vi):
# Assert that bounds are the same.
self.assertEqual(len(sdp_problem_vi.bounds), len(sdp_verify_vi.bounds))
for sdp_problem_bound, sdp_verify_bound in zip(
sdp_problem_vi.bounds, sdp_verify_vi.bounds):
self.assertArrayAlmostEqual(sdp_problem_bound.lb, sdp_verify_bound.lb)
self.assertArrayAlmostEqual(sdp_problem_bound.ub, sdp_verify_bound.ub)
# Don't compare dual shapes/types in detail, because the different
# implementations can and do represent them in different
# (but equivalent) ways.
# They should have the same length, though.
self.assertEqual(len(sdp_problem_vi.dual_shapes),
len(sdp_verify_vi.dual_shapes))
self.assertEqual(len(sdp_problem_vi.dual_types),
len(sdp_verify_vi.dual_types))
# Evaluate each problem's dual objective on the same random dual variables.
def random_dual_fun(verif_instance):
key = jax.random.PRNGKey(103)
random_like = lambda x: jax.random.uniform(key, x.shape, x.dtype)
duals = sdp_verify.init_duals(verif_instance, None)
duals = jax.tree_map(random_like, duals)
return sdp_verify.dual_fun(verif_instance, duals)
self.assertAlmostEqual(
random_dual_fun(sdp_problem_vi), random_dual_fun(sdp_verify_vi),
places=5)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/sdp_verify/problem_from_graph_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for crown_boundprop.py."""
import functools
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.sdp_verify import boundprop_utils
from jax_verify.extensions.sdp_verify import utils
import numpy as np
class BoundpropTest(parameterized.TestCase):
def test_crown_boundprop(self):
"""Test CROWN bounds vs FGSM on Wong-Small MNIST CNN."""
crown_boundprop = functools.partial(boundprop_utils.boundprop,
boundprop_type='crown_ibp')
self._test_boundprop(crown_boundprop)
def test_nonconvex_boundprop(self):
"""Test Nonconvex bounds vs FGSM on Wong-Small MNIST CNN."""
# Minimal test, since this already takes 70s.
nonconvex_boundprop = functools.partial(
boundprop_utils.boundprop, boundprop_type='nonconvex',
nonconvex_boundprop_steps=2)
self._test_boundprop(nonconvex_boundprop, num_idxs_to_test=1)
def test_ibp_boundprop(self):
def boundprop(params, x, epsilon, input_bounds):
assert len(x.shape) == 4 and x.shape[0] == 1, f'shape check {x.shape}'
init_bound = utils.init_bound(x[0], epsilon, input_bounds=input_bounds)
return utils.boundprop(params, init_bound)
self._test_boundprop(boundprop)
def _test_boundprop(self, boundprop_method, num_idxs_to_test=5):
"""Test `boundprop_method` on Wong-Small MNIST CNN."""
with jax_verify.open_file('mnist/x_test_first100.npy', 'rb') as f:
xs = np.load(f)
model_name = 'models/mnist_wongsmall_eps_10_adv.pkl'
with jax_verify.open_file(model_name, 'rb') as f:
params = pickle.load(f)
x = xs[0]
eps = 0.1
bounds = boundprop_method(params, np.expand_dims(x, axis=0), eps,
input_bounds=(0., 1.))
crown_lbs = utils.flatten([b.lb_pre for b in bounds[1:]])
crown_ubs = utils.flatten([b.ub_pre for b in bounds[1:]])
max_idx = crown_lbs.shape[0]
np.random.seed(0)
test_idxs = np.random.randint(max_idx, size=num_idxs_to_test)
@jax.jit
def fwd(x):
_, acts = utils.predict_cnn(params, jnp.expand_dims(x, 0),
include_preactivations=True)
return acts
get_act = lambda x, idx: utils.flatten(fwd(x), backend=jnp)[idx]
print('Number of activations:', crown_lbs.shape[0])
print('Bound shape', [b.lb.shape for b in bounds])
print('Activation shape', [a.shape for a in fwd(x)])
assert utils.flatten(fwd(x)).shape == crown_lbs.shape, (
f'bad shape {crown_lbs.shape}, {utils.flatten(fwd(x)).shape}')
for idx in test_idxs:
nom = get_act(x, idx)
crown_lb = crown_lbs[idx]
crown_ub = crown_ubs[idx]
adv_loss = lambda x: get_act(x, idx) # pylint: disable=cell-var-from-loop
x_lb = utils.pgd(adv_loss, x, eps, 5, 0.01)
fgsm_lb = get_act(x_lb, idx)
adv_loss = lambda x: -get_act(x, idx) # pylint: disable=cell-var-from-loop
x_ub = utils.pgd(adv_loss, x, eps, 5, 0.01)
fgsm_ub = get_act(x_ub, idx)
print(f'Idx {idx}: Boundprop LB {crown_lb}, FGSM LB {fgsm_lb}, '
f'Nominal {nom}, FGSM UB {fgsm_ub}, Boundprop UB {crown_ub}')
margin = 1e-5
assert crown_lb <= fgsm_lb + margin, f'Bad lower bound. Idx {idx}.'
assert crown_ub >= fgsm_ub - margin, f'Bad upper bound. Idx {idx}.'
crown_lb_post, fgsm_lb_post = max(crown_lb, 0), max(fgsm_lb, 0)
crown_ub_post, fgsm_ub_post = max(crown_ub, 0), max(fgsm_ub, 0)
assert crown_lb_post <= fgsm_lb_post + margin, f'Idx {idx}.'
assert crown_ub_post >= fgsm_ub_post - margin, f'Idx {idx}.'
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/sdp_verify/boundprop_utils_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cvxpy_verify.py."""
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from cvxpy.reductions.solvers.defines import INSTALLED_MI_SOLVERS as MIP_SOLVERS
import jax.numpy as jnp
from jax_verify.extensions.sdp_verify import cvxpy_verify
from jax_verify.extensions.sdp_verify import utils
from jax_verify.tests.sdp_verify import test_utils
NO_MIP_SOLVERS_MESSAGE = 'No mixed-integer solver is installed.'
class CvxpyTest(parameterized.TestCase):
@unittest.skipUnless(MIP_SOLVERS, NO_MIP_SOLVERS_MESSAGE)
def test_mip_status(self):
"""Test toy MIP is solved optimally by cvxpy."""
for seed in range(10):
verif_instance = test_utils.make_toy_verif_instance(seed)
val, info = cvxpy_verify.solve_mip_mlp_elided(verif_instance)
status = info['problem'].status
assert val is not None
assert status in ('optimal', 'optimal_inaccurate'), f'Status is {status}.'
def test_sdp_status(self):
"""Test toy SDP is solved optimally by cvxpy."""
for seed in range(10):
verif_instance = test_utils.make_toy_verif_instance(seed)
val, info = cvxpy_verify.solve_sdp_mlp_elided(verif_instance)
status = info['problem'].status
assert val is not None
assert status in ('optimal', 'optimal_inaccurate'), f'Status is {status}.'
def _fgsm_example_and_bound(params, target_label, label):
model_fn = lambda x: utils.predict_mlp(params, x)
x = 0.5 * jnp.ones(utils.nn_layer_sizes(params)[0])
epsilon = 0.5
x_adv = utils.fgsm_single(model_fn, x, label, target_label, epsilon,
num_steps=30, step_size=0.03)
return x_adv, utils.adv_objective(model_fn, x_adv, label, target_label)
MARGIN = 1e-6
class CrossingBoundsTest(parameterized.TestCase):
"""Check IBP,SDP relaxations <= MIP <= FGSM upper bound."""
@unittest.skipUnless(MIP_SOLVERS, NO_MIP_SOLVERS_MESSAGE)
def test_fgsm_vs_mip(self):
num_repeats = 5
target_label, label = 1, 2
for seed in range(num_repeats):
verif_instance = test_utils.make_toy_verif_instance(
seed, target_label=target_label, label=label)
mip_val, _ = cvxpy_verify.solve_mip_mlp_elided(verif_instance)
_, fgsm_val = _fgsm_example_and_bound(
verif_instance.params_full, target_label=target_label, label=label)
assert mip_val > fgsm_val - MARGIN, (
'MIP exact solution should be greater than FGSM lower bound.')
@unittest.skipUnless(MIP_SOLVERS, NO_MIP_SOLVERS_MESSAGE)
def test_sdp_vs_mip(self):
num_repeats = 5
loss_margin = 1e-3 # fixed via runs_per_test=300 with random seeds
for seed in range(num_repeats):
verif_instance = test_utils.make_toy_verif_instance(seed)
mip_val, _ = cvxpy_verify.solve_mip_mlp_elided(verif_instance)
sdp_val, _ = cvxpy_verify.solve_sdp_mlp_elided(verif_instance)
assert sdp_val > mip_val - loss_margin, (
'SDP relaxation should be greater than MIP exact solution. '
f'Vals are MIP: {mip_val} SDP: {sdp_val}')
class MatchingBoundsTest(parameterized.TestCase):
@unittest.skipUnless(MIP_SOLVERS, NO_MIP_SOLVERS_MESSAGE)
def test_fgsm_vs_mip(self):
"""Check FGSM and MIP reach same solution/value most of the time."""
# Note this test only works with fixed seeds
num_repeats = 5
expected_successes = 4
num_successes = 0
loss_margin = 0.01
target_label, label = 1, 2
for seed in range(num_repeats):
verif_instance = test_utils.make_toy_verif_instance(
seed, target_label=target_label, label=label)
mip_val, _ = cvxpy_verify.solve_mip_mlp_elided(verif_instance)
_, fgsm_val = _fgsm_example_and_bound(
verif_instance.params_full, target_label=target_label, label=label)
if abs(mip_val - fgsm_val) < loss_margin:
num_successes += 1
assert num_successes >= expected_successes, f'Successes: {num_successes}'
class SdpTest(parameterized.TestCase):
def test_constraints_numpy(self):
num_repeats = 5
margin = 3e-4
for seed in range(num_repeats):
verif_instance = test_utils.make_toy_verif_instance(
seed=seed, label=1, target_label=2)
obj_value, info = cvxpy_verify.solve_sdp_mlp_elided(verif_instance)
obj_np, violations = cvxpy_verify.check_sdp_bounds_numpy(
info['P'].value, verif_instance)
assert abs(obj_np - obj_value) < margin, 'objective does not match'
for k, v in violations.items():
assert v < margin, f'violation of {k} by {v}'
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/sdp_verify/cvxpy_verify_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sdp_verify.py."""
import os
import random
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from cvxpy.reductions.solvers.defines import INSTALLED_MI_SOLVERS as MIP_SOLVERS
import jax
import jax.numpy as jnp
import jax.scipy
from jax_verify.extensions.sdp_verify import cvxpy_verify
from jax_verify.extensions.sdp_verify import eigenvector_utils
from jax_verify.extensions.sdp_verify import problem
from jax_verify.extensions.sdp_verify import sdp_verify
from jax_verify.extensions.sdp_verify import utils
from jax_verify.tests.sdp_verify import test_utils
import numpy as np
import optax
NO_MIP_SOLVERS_MESSAGE = 'No mixed-integer solver is installed.'
class LanczosTest(parameterized.TestCase):
def _test_max_eigenvector_lanczos_once(self, seed=0, dynamic_unroll=False):
"""Test max_eigenvector_lanczos against Scipy eigenvector method."""
dim = 5
key = jax.random.PRNGKey(seed)
h_tmp = jax.random.normal(key, shape=(dim, dim))
h = h_tmp + jnp.transpose(h_tmp)
hv = lambda v: jnp.matmul(h, v)
# Do `dim` iterations of Lanczos
max_eigenvec_lanczos = eigenvector_utils.max_eigenvector_lanczos(
hv, dim, dim, key, dynamic_unroll=dynamic_unroll)
_, eigen_vecs_scipy = jax.scipy.linalg.eigh(h)
max_eigenvec_scipy = eigen_vecs_scipy[:, -1]
# Eigenvector can be v or -v
err = min(jnp.linalg.norm(max_eigenvec_lanczos - max_eigenvec_scipy),
jnp.linalg.norm(max_eigenvec_lanczos + max_eigenvec_scipy))
# Eigenvectors have unit norm - check for relative error below 1e-5
assert err < 1e-5, (f'err: {err}, lanczos: {max_eigenvec_lanczos} '
f'scipy: {max_eigenvec_scipy}')
def test_max_eigenvector_lanczos(self):
for i in range(10):
self._test_max_eigenvector_lanczos_once(seed=i)
for i in range(10):
self._test_max_eigenvector_lanczos_once(seed=i, dynamic_unroll=True)
def _test_lanczos_dynamic_vs_static_once(self, seed=0):
def _safe_div(x1, x2):
return jnp.where(jnp.logical_and(x1 == 0, x2 == 0), x1, x1 / x2)
dim = 5
key = jax.random.PRNGKey(seed)
h_tmp = jax.random.normal(key, shape=(dim, dim))
h = h_tmp + jnp.transpose(h_tmp)
hv = lambda v: jnp.matmul(h, v)
tr1, vecs1 = eigenvector_utils.lanczos_alg(
hv, dim, dim, key, dynamic_unroll=True)
tr2, vecs2 = eigenvector_utils.lanczos_alg(
hv, dim, dim, key, dynamic_unroll=False)
assert jnp.max(jnp.abs(_safe_div(tr1 - tr2, tr2))) < 1e-4, (
f'Seed {seed}: large relative error in Lanczos tridiag')
assert jnp.max(jnp.abs(_safe_div(vecs1 - vecs2, vecs2))) < 1e-4, (
f'Seed {seed}: large relative error in Lanczos vecs')
def test_lanczos_dynamic_vs_static(self):
for i in range(10):
self._test_lanczos_dynamic_vs_static_once(seed=i)
class SdpDualPrimalTest(parameterized.TestCase):
"""Tests comparing SDP dual bounds to CVXPY exact solution of primal."""
@unittest.skipUnless(MIP_SOLVERS, NO_MIP_SOLVERS_MESSAGE)
def test_crossing_bounds(self):
loss_margin = 1e-3
seed = random.randint(1, 10000)
verif_instance = test_utils.make_toy_verif_instance(seed)
key = jax.random.PRNGKey(0)
primal_opt, _ = cvxpy_verify.solve_mip_mlp_elided(verif_instance)
dual_ub, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance), key, num_steps=1000)
assert dual_ub > primal_opt - loss_margin, (
'Dual upper bound should be greater than optimal primal objective.'
f'Seed is {seed}. Vals are Dual: {dual_ub} Primal: {primal_opt}')
def _test_tight_duality_gap(self, seed, loss_margin=0.003, num_steps=3000):
verif_instance = test_utils.make_toy_verif_instance(
seed, label=1, target_label=2)
key = jax.random.PRNGKey(0)
primal_opt, _ = cvxpy_verify.solve_sdp_mlp_elided(verif_instance)
dual_ub, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance), key,
num_steps=num_steps, verbose=False)
assert dual_ub - primal_opt < loss_margin, (
'Primal and dual vals should be close. '
f'Seed: {seed}. Primal: {primal_opt}, Dual: {dual_ub}')
assert dual_ub > primal_opt - 1e-3, 'crossing bounds'
def test_tight_duality_gap(self):
self._test_tight_duality_gap(0)
def local_test_tight_duality_gap(self): # pylint: disable=g-unreachable-test-method
"""Local test, meant to be run in parallel with --tests_per_run."""
seed = random.randint(1, 10000)
# 5/300 failures at loss_margin=0.01, 0/300 failures at loss_margin=0.3
self._test_tight_duality_gap(seed, loss_margin=0.03, num_steps=3000)
class SdpVerifyTest(parameterized.TestCase):
@parameterized.named_parameters(
('MLP', 'mlp'),
('CNN', 'cnn')
)
def test_sdp_dual_simple_no_crash(self, model_type):
verif_instance = test_utils.make_toy_verif_instance(
seed=0, target_label=1, label=2, nn=model_type)
kwargs = {
'key': jax.random.PRNGKey(0),
'opt': optax.adam(1e-3),
'num_steps': 10,
'eval_every': 5,
'verbose': False,
'use_exact_eig_eval': False,
'use_exact_eig_train': False,
'n_iter_lanczos': 5,
'kappa_reg_weight': 1e-5,
'kappa_zero_after': 8,
'device_type': None,
}
verif_instance = problem.make_sdp_verif_instance(verif_instance)
# Check all kwargs work.
dual_val, _ = sdp_verify.solve_sdp_dual_simple(verif_instance, **kwargs)
assert isinstance(dual_val, float)
# Check code runs without kwargs.
dual_val, _ = sdp_verify.solve_sdp_dual_simple(verif_instance, num_steps=5)
assert isinstance(dual_val, float)
def test_dual_sdp_no_crash(self):
for nn in ['cnn', 'mlp']:
verif_instance = test_utils.make_toy_verif_instance(
seed=0, target_label=1, label=2, nn=nn)
key = jax.random.PRNGKey(0)
dual_val, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance), key, num_steps=10,
n_iter_lanczos=5)
assert isinstance(dual_val, float)
def test_correct_dual_var_types(self):
for nn in ['cnn', 'mlp']:
verif_instance = test_utils.make_toy_verif_instance(
seed=0, target_label=1, label=2, nn=nn)
key = jax.random.PRNGKey(0)
dual_vars = sdp_verify.init_duals(
problem.make_sdp_verif_instance(verif_instance), key)
assert len(dual_vars) == 3, 'Input, one hidden layer, kappa'
assert isinstance(dual_vars[0], problem.DualVar)
assert isinstance(dual_vars[1], problem.DualVarFin)
assert isinstance(dual_vars[2], jax.Array)
def test_ibp_init_matches_ibp_bound(self):
for nn in ['cnn', 'mlp']:
for seed in range(20):
orig_verif_instance = test_utils.make_toy_verif_instance(seed, nn=nn)
key = jax.random.PRNGKey(0)
verif_instance = problem.make_sdp_verif_instance(orig_verif_instance)
dual_vars = jax.tree_map(lambda s: None if s is None else jnp.zeros(s),
verif_instance.dual_shapes)
dual_vars = sdp_verify.init_duals_ibp(verif_instance, dual_vars)
dual_loss = sdp_verify.dual_fun(
verif_instance, dual_vars, key, exact=True)
ibp_bound = utils.ibp_bound_elided(orig_verif_instance)
assert abs(dual_loss - ibp_bound) < 1e-4, (
f'Loss at initialization should match IBP: {dual_loss} {ibp_bound}')
class SdpVerifyTestCNNvsMLP(parameterized.TestCase):
@unittest.skipIf('TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true',
'Test produces nans on Travis CI but passes locally.')
def test_cnn_mlp_match_fixed_window(self):
num_steps = 1000
for seed in range(1):
verif_instance = test_utils.make_toy_verif_instance(seed, nn='cnn_simple')
key = jax.random.PRNGKey(0)
params_cnn = verif_instance.params_full
in_shape = int(np.prod(np.array(params_cnn[0]['W'].shape[:-1])))
# Input and filter size match -> filter is applied at just one location.
# Number of layer 1 neurons = no. channels out of conv filter (last dim).
out_shape = params_cnn[0]['W'].shape[-1]
params_mlp = [(jnp.reshape(params_cnn[0]['W'],
(in_shape, out_shape)), params_cnn[0]['b']),
(params_cnn[1][0], params_cnn[1][1])]
bounds_mlp = sdp_verify.boundprop(
params_mlp,
sdp_verify.IntBound(
lb=np.zeros((1, in_shape)),
ub=1 * np.ones((1, in_shape)),
lb_pre=None,
ub_pre=None))
verif_instance_mlp = utils.make_nn_verif_instance(params_mlp, bounds_mlp)
dual_ub_cnn, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance), key,
num_steps=num_steps, verbose=False, use_exact_eig_train=True)
dual_ub_mlp, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance_mlp), key,
num_steps=num_steps, verbose=False, use_exact_eig_train=True)
assert abs(dual_ub_cnn - dual_ub_mlp) < 1e-2, (
'Dual upper bound for MLP and CNN (simple CNN) should match.'
f'Seed is {seed}. Vals are CNN: {dual_ub_cnn} MLP: {dual_ub_mlp}')
# Error below 1e-4 when run-locally with steps > 3000.
# Setting error here to 1e-2 with 500 steps for faster unit-tests
def test_cnn_mlp_match_sliding_window(self):
num_steps = 1000
for seed in range(1):
verif_instance = test_utils.make_toy_verif_instance(seed, nn='cnn_slide')
key = jax.random.PRNGKey(0)
lb, ub, params_mlp = test_utils.make_mlp_verif_instance_from_cnn(
verif_instance)
bounds_mlp = sdp_verify.boundprop(
params_mlp,
sdp_verify.IntBound(lb=lb, ub=ub, lb_pre=None, ub_pre=None))
verif_instance_mlp = utils.make_nn_verif_instance(params_mlp, bounds_mlp)
dual_ub_cnn, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance),
key,
num_steps=num_steps,
verbose=False,
use_exact_eig_train=True)
dual_ub_mlp, _ = sdp_verify.solve_sdp_dual(
problem.make_sdp_verif_instance(verif_instance_mlp),
key,
num_steps=num_steps,
verbose=False,
use_exact_eig_train=True)
assert abs(dual_ub_cnn - dual_ub_mlp) < 5e-3, (
'Dual upper bound for MLP and CNN (sliding filter) should match.'
f'Seed is {seed}. Vals are CNN: {dual_ub_cnn} MLP: {dual_ub_mlp}')
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/sdp_verify/sdp_verify_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cvxpy_verify.py."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import jax.random as random
from jax_verify.extensions.sdp_verify import utils
from jax_verify.tests.sdp_verify import test_utils
class ParamExtractionTest(parameterized.TestCase):
"""Test the functions extracting network parameters from functions."""
def check_fun_extract(self, fun_to_extract, example_inputs):
extracted_params = utils.get_layer_params(fun_to_extract, example_inputs)
eval_original = fun_to_extract(example_inputs)
eval_extracted = utils.predict_cnn(extracted_params, example_inputs)
self.assertAlmostEqual(jnp.abs(eval_original - eval_extracted).max(), 0.0,
places=6)
def test_cnn_extract(self):
"""Test that weights from a CNN can be extracted."""
key = random.PRNGKey(0)
k1, k2 = random.split(key)
input_sizes = (1, 2, 2, 1)
layer_sizes = [input_sizes, {
'n_h': 2,
'n_w': 2,
'n_cout': 2,
'padding': 'VALID',
'stride': 1,
'n_cin': 1
}, 3]
cnn_params = test_utils.make_cnn_params(layer_sizes, k1)
fun_to_extract = functools.partial(utils.predict_cnn, cnn_params)
example_inputs = random.normal(k2, input_sizes)
self.check_fun_extract(fun_to_extract, example_inputs)
def test_cnn_withpreproc(self):
"""Test extraction of weights from a CNN with input preprocessing."""
key = random.PRNGKey(0)
k1, k2, k3, k4 = random.split(key, num=4)
input_sizes = (1, 2, 2, 3)
layer_sizes = [input_sizes, {
'n_h': 2,
'n_w': 2,
'n_cout': 2,
'padding': 'VALID',
'stride': 1,
'n_cin': 3
}, 3]
cnn_params = test_utils.make_cnn_params(layer_sizes, k1)
example_inputs = random.normal(k2, input_sizes)
input_mean = random.normal(k3, (3,))
input_std = random.normal(k4, (3,))
def fun_to_extract(inputs):
inp = (inputs - input_mean) / input_std
return utils.predict_cnn(cnn_params, inp)
self.check_fun_extract(fun_to_extract, example_inputs)
def test_mlp_extract(self):
"""Test that weights from a MLP can be extracted."""
key = random.PRNGKey(0)
k1, k2 = random.split(key)
input_sizes = (5,)
layer_sizes = (5, 8, 5)
mlp_params = test_utils.make_mlp_params(layer_sizes, k1)
fun_to_extract = functools.partial(utils.predict_mlp, mlp_params)
example_inputs = random.normal(k2, input_sizes)
self.check_fun_extract(fun_to_extract, example_inputs)
def test_mlp_withpreproc(self):
"""Test extraction of weights from a MLP with input preprocessing."""
key = random.PRNGKey(0)
k1, k2, k3, k4 = random.split(key, num=4)
input_sizes = (5,)
layer_sizes = (5, 8, 5)
mlp_params = test_utils.make_mlp_params(layer_sizes, k1)
example_inputs = random.normal(k2, input_sizes)
input_mean = random.normal(k3, input_sizes)
input_std = random.normal(k4, input_sizes)
def fun_to_extract(inputs):
inp = (inputs - input_mean) / input_std
return utils.predict_mlp(mlp_params, inp)
self.check_fun_extract(fun_to_extract, example_inputs)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/sdp_verify/test_utilfuns.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elementary tests for the Lagrangian forms."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
from jax_verify.extensions.functional_lagrangian import lagrangian_form
INPUT_SHAPES = (('batched_0d', [1]), ('batched_1d', [1, 2]),
('batched_2d', [1, 2, 3]), ('batched_3d', [1, 2, 3, 4]))
class ShapeTest(chex.TestCase):
def setUp(self):
super(ShapeTest, self).setUp()
self._prng_seq = hk.PRNGSequence(13579)
def _assert_output_shape(self, form, shape):
x = jax.random.normal(next(self._prng_seq), shape)
params = form.init_params(next(self._prng_seq), x.shape[1:])
out = form.apply(x, params, step=0)
assert out.ndim == 1
@parameterized.named_parameters(*INPUT_SHAPES)
def test_linear(self, shape):
form = lagrangian_form.Linear()
self._assert_output_shape(form, shape)
@parameterized.named_parameters(*INPUT_SHAPES)
def test_linear_exp(self, shape):
form = lagrangian_form.LinearExp()
self._assert_output_shape(form, shape)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/functional_lagrangian/lagrangian_form_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit-test for linear Lagrangian."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import dual_solve
from jax_verify.extensions.functional_lagrangian import lagrangian_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.functional_lagrangian.inner_solvers import lp
from jax_verify.extensions.sdp_verify import utils as sdp_utils
from jax_verify.src import bound_propagation
from jax_verify.src.mip_solver import cvxpy_relaxation_solver
from jax_verify.src.mip_solver import relaxation
from jax_verify.tests.sdp_verify import test_utils as sdp_test_utils
import ml_collections
import numpy as np
NUM_SAMPLES = 1
LAYER_SIZES = [3, 4, 5, 6]
def create_inputs(prng_key):
return jax.random.uniform(
prng_key, [NUM_SAMPLES, LAYER_SIZES[0]], minval=0.0, maxval=1.0)
def make_model_fn(params):
def model_fn(inputs):
inputs = np.reshape(inputs, (inputs.shape[0], -1))
return sdp_utils.predict_mlp(params, inputs)
return model_fn
def get_config():
config = ml_collections.ConfigDict()
config.outer_opt = ml_collections.ConfigDict()
config.outer_opt.lr_init = 0.001
config.outer_opt.steps_per_anneal = 500
config.outer_opt.anneal_lengths = ''
config.outer_opt.anneal_factor = 0.1
config.outer_opt.num_anneals = 1
config.outer_opt.opt_name = 'adam'
config.outer_opt.opt_kwargs = {}
return config
class LinearTest(chex.TestCase):
def setUp(self):
super(LinearTest, self).setUp()
self.target_label = 1
self.label = 0
self.input_bounds = (0.0, 1.0)
self.layer_sizes = LAYER_SIZES
self.eps = 0.1
prng_key = jax.random.PRNGKey(13579)
self.keys = jax.random.split(prng_key, 5)
self.network_params = sdp_test_utils.make_mlp_params(
self.layer_sizes, self.keys[0])
self.inputs = create_inputs(self.keys[1])
objective = jnp.zeros(self.layer_sizes[-1])
objective = objective.at[self.target_label].add(1)
objective = objective.at[self.label].add(-1)
self.objective = objective
self.objective_bias = jax.random.normal(self.keys[2], [])
def solve_with_jax_verify(self):
lower_bound = jnp.minimum(jnp.maximum(self.inputs - self.eps, 0.0), 1.0)
upper_bound = jnp.minimum(jnp.maximum(self.inputs + self.eps, 0.0), 1.0)
init_bound = jax_verify.IntervalBound(lower_bound, upper_bound)
logits_fn = make_model_fn(self.network_params)
solver = cvxpy_relaxation_solver.CvxpySolver
relaxation_transform = relaxation.RelaxationTransform(
jax_verify.ibp_transform)
var, env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(relaxation_transform),
logits_fn, init_bound)
# This solver minimizes the objective -> get max with -min(-objective)
neg_value_opt, _, _ = relaxation.solve_relaxation(
solver,
-self.objective,
-self.objective_bias,
var,
env,
index=0,
time_limit_millis=None)
value_opt = -neg_value_opt
return value_opt
def solve_with_functional_lagrangian(self):
config = get_config()
init_bound = sdp_utils.init_bound(
self.inputs[0], self.eps, input_bounds=self.input_bounds)
bounds = sdp_utils.boundprop(
self.network_params + [(self.objective, self.objective_bias)],
init_bound)
logits_fn = make_model_fn(self.network_params)
def spec_fn(inputs):
return jnp.matmul(logits_fn(inputs), self.objective) + self.objective_bias
input_bounds = jax_verify.IntervalBound(bounds[0].lb, bounds[0].ub)
lagrangian_form_per_layer = lagrangian_form.Linear()
lagrangian_form_per_layer = [lagrangian_form_per_layer for bd in bounds]
inner_opt = lp.LpStrategy()
env, dual_params, dual_params_types = inner_opt.init_duals(
jax_verify.ibp_transform, verify_utils.SpecType.ADVERSARIAL, False,
spec_fn, self.keys[3], lagrangian_form_per_layer, input_bounds)
opt, num_steps = dual_build.make_opt_and_num_steps(config.outer_opt)
dual_state = ml_collections.ConfigDict(type_safe=False)
dual_solve.solve_dual_train(
env,
key=self.keys[4],
num_steps=num_steps,
opt=opt,
dual_params=dual_params,
dual_params_types=dual_params_types,
dual_state=dual_state,
affine_before_relu=False,
spec_type=verify_utils.SpecType.ADVERSARIAL,
inner_opt=inner_opt,
logger=(lambda *args: None),
)
return dual_state.loss
def test_lp_against_jax_verify_relaxation(self):
value_jax_verify = self.solve_with_jax_verify()
value_functional_lagrangian = self.solve_with_functional_lagrangian()
np.testing.assert_allclose(
value_jax_verify, value_functional_lagrangian, rtol=1e-3)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/functional_lagrangian/lp_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit-test for uncertainty spec inner max."""
from absl.testing import absltest
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import lagrangian_form as lag_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.functional_lagrangian.inner_solvers import uncertainty_spec
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import numpy as np
X_SHAPE = [1, 7]
class UncertaintySpecTest(chex.TestCase):
def setUp(self):
super(UncertaintySpecTest, self).setUp()
self._prng_seq = hk.PRNGSequence(13579)
self._n_classes = X_SHAPE[1]
self.bounds = [
sdp_utils.IntBound(
lb_pre=-0.1 * jnp.ones(X_SHAPE),
ub_pre=0.1 * jnp.ones(X_SHAPE),
lb=None,
ub=None)
]
def test_softmax_upper(self):
rand_class = jax.random.randint(
next(self._prng_seq), shape=(), minval=0, maxval=self._n_classes)
objective = jnp.arange(self._n_classes) == rand_class
constant = jax.random.uniform(next(self._prng_seq), ())
affine_fn = lambda x: jnp.sum(x * objective) + constant
lagrangian_form = lag_form.Linear()
lp_pre = lagrangian_form.init_params(
next(self._prng_seq), l_shape=X_SHAPE, init_zeros=False)
opt_instance = verify_utils.InnerVerifInstance(
affine_fns=[affine_fn],
bounds=self.bounds,
lagrangian_form_pre=lagrangian_form,
lagrangian_form_post=lagrangian_form,
is_first=False,
is_last=True,
lagrange_params_pre=lp_pre,
lagrange_params_post=None,
idx=0,
spec_type=verify_utils.SpecType.UNCERTAINTY,
affine_before_relu=True)
# run PGA to find approximate max
pga_opt = uncertainty_spec.UncertaintySpecStrategy(
n_iter=10_000,
n_pieces=0,
solve_max=uncertainty_spec.MaxType.EXP,
)
value_pga = pga_opt.solve_max(
inner_dual_vars=None,
opt_instance=opt_instance,
key=next(self._prng_seq),
step=0)
# use cvxpy to find upper bound
cvx_opt = uncertainty_spec.UncertaintySpecStrategy(
n_iter=0,
n_pieces=10,
solve_max=uncertainty_spec.MaxType.EXP_BOUND,
)
value_cvx = cvx_opt.solve_max(
inner_dual_vars=None,
opt_instance=opt_instance,
key=next(self._prng_seq),
step=0)
# evaluate objective function on an arbitrarily chosen feasible point
def objective_fn(x):
return (jnp.squeeze(affine_fn(jax.nn.softmax(x)), ()) -
jnp.squeeze(lagrangian_form.apply(x, lp_pre, step=0), ()))
middle_x = 0.5 * self.bounds[0].lb_pre + 0.5 * self.bounds[0].ub_pre
value_middle = objective_fn(middle_x)
np.testing.assert_array_less(value_middle, value_pga)
np.testing.assert_array_less(value_pga, value_cvx + 1e-5)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/functional_lagrangian/uncertainty_spec_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attacks test."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import attacks
from jax_verify.extensions.functional_lagrangian import verify_utils
EPS = 0.1
def make_data_spec(prng_key):
"""Create data specification from config."""
x = jax.random.normal(prng_key, [8])
input_bounds = (x - EPS, x + EPS)
return verify_utils.DataSpec(
input=x,
true_label=0,
target_label=1,
epsilon=EPS,
input_bounds=input_bounds)
def make_params(prng_key, dropout_rate=0.0, std=None):
prng_key_seq = hk.PRNGSequence(prng_key)
w1 = jax.random.normal(next(prng_key_seq), [8, 4])
b1 = jax.random.normal(next(prng_key_seq), [4])
w2 = jax.random.normal(next(prng_key_seq), [4, 2])
b2 = jax.random.normal(next(prng_key_seq), [2])
if std is not None:
w1_std = std * jnp.ones([8, 4])
b1_std = std * jnp.ones([4])
w1_bound = jax_verify.IntervalBound(w1 - 3 * w1_std, w1 + 3 * w1_std)
b1_bound = jax_verify.IntervalBound(b1 - 3 * b1_std, b1 + 3 * b1_std)
else:
w1_std, b1_std, w1_bound, b1_bound = None, None, None, None
params = [
verify_utils.FCParams(
w=w1,
b=b1,
w_std=w1_std,
b_std=b1_std,
w_bound=w1_bound,
b_bound=b1_bound,
),
verify_utils.FCParams(
w=w2,
b=b2,
dropout_rate=dropout_rate,
)
]
return params
class AttacksTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.prng_seq = hk.PRNGSequence(1234)
self.data_spec = make_data_spec(next(self.prng_seq))
def test_forward_deterministic(self):
params = make_params(next(self.prng_seq))
self._check_deterministic_behavior(params)
def test_forward_almost_no_randomness(self):
params = make_params(next(self.prng_seq), std=1e-8, dropout_rate=1e-8)
self._check_deterministic_behavior(params)
def test_forward_gaussian(self):
params = make_params(next(self.prng_seq), std=1.0)
self._check_stochastic_behavior(params)
def test_forward_dropout(self):
params = make_params(next(self.prng_seq), dropout_rate=0.8)
self._check_stochastic_behavior(params)
def test_adversarial_integration(self):
spec_type = verify_utils.SpecType.ADVERSARIAL
params = make_params(next(self.prng_seq), std=0.1, dropout_rate=0.2)
attacks.adversarial_attack(
params,
self.data_spec,
spec_type,
next(self.prng_seq),
num_steps=5,
learning_rate=0.1,
num_samples=3)
def test_adversarial_uncertainty_integration(self):
spec_type = verify_utils.SpecType.ADVERSARIAL
params = make_params(next(self.prng_seq), std=0.1, dropout_rate=0.2)
attacks.adversarial_attack(
params,
self.data_spec,
spec_type,
next(self.prng_seq),
num_steps=5,
learning_rate=0.1,
num_samples=3)
def _make_value_and_grad(self, params, num_samples):
forward_fn = attacks.make_forward(params, num_samples)
def objective_fn(x, prng_key):
out = jnp.reshape(forward_fn(x, prng_key), [2])
return out[1] - out[0]
return jax.value_and_grad(objective_fn)
def _check_deterministic_behavior(self, params):
# build function with 1 sample
value_and_grad_fn = self._make_value_and_grad(params, num_samples=1)
# forward first time
out_1 = value_and_grad_fn(self.data_spec.input, next(self.prng_seq))
# forward again gives the same result
out_1_again = value_and_grad_fn(self.data_spec.input, next(self.prng_seq))
chex.assert_trees_all_close(out_1, out_1_again, rtol=1e-5)
# forward with 3 samples should still give the same result
value_and_grad_fn = self._make_value_and_grad(params, num_samples=3)
out_3 = value_and_grad_fn(self.data_spec.input, next(self.prng_seq))
chex.assert_trees_all_close(out_3, out_1, rtol=1e-5)
def _check_stochastic_behavior(self, params):
value_and_grad_fn = self._make_value_and_grad(params, num_samples=2)
prng = next(self.prng_seq)
# forward a first time
out_2 = value_and_grad_fn(self.data_spec.input, prng)
# forward with a different seed does not give the same result
out_2_diff = value_and_grad_fn(self.data_spec.input, next(self.prng_seq))
with self.assertRaises(AssertionError):
chex.assert_trees_all_close(out_2, out_2_diff)
# forward with 3 samples and the same prng is not the same
value_and_grad_fn = self._make_value_and_grad(params, num_samples=3)
out_3_same_prng = value_and_grad_fn(self.data_spec.input, prng)
with self.assertRaises(AssertionError):
chex.assert_trees_all_close(out_2, out_3_same_prng)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/functional_lagrangian/attacks_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit-test for projected gradient ascent."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import lagrangian_form as lag_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.functional_lagrangian.inner_solvers import pga
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import numpy as np
LagrangianForm = lag_form.LagrangianForm
X_SHAPE = [1, 7]
class QuadLinDiagForm(LagrangianForm):
def _init_params_per_sample(self):
return None
def _apply(self, x, lp, step=None):
return jnp.sum(0.5 * lp[0] * x**2 + lp[1] * x)
def __init__(self):
self._name = 'QuadLinDiag'
class QuadDiagForm(LagrangianForm):
def _init_params_per_sample(self):
return None
def _apply(self, x, lp, step=None):
return jnp.sum(0.5 * lp * x**2)
def __init__(self):
self._name = 'QuadDiag'
class PGATest(chex.TestCase):
def setUp(self):
super().setUp()
self.prng_key = jax.random.PRNGKey(1234)
self.bounds = [
sdp_utils.IntBound(
lb=-jnp.ones(X_SHAPE),
ub=jnp.ones(X_SHAPE),
lb_pre=None,
ub_pre=None),
sdp_utils.IntBound(lb=None, ub=None, lb_pre=None, ub_pre=None),
]
def test_intermediate_problem(self):
prng_keys = jax.random.split(self.prng_key, 7)
# create a decomposable quadratic problem (see value_per_component further)
# we use absolute values to ensure that the resulting problem is concave,
# so that PGA finds the global solution.
weight_diag = jnp.abs(jax.random.normal(prng_keys[0], X_SHAPE[1:]))
bias = jax.random.normal(prng_keys[1], X_SHAPE[1:])
lp_pre_quad = jnp.abs(jax.random.normal(prng_keys[2], X_SHAPE[1:]))
lp_pre_lin = jax.random.normal(prng_keys[3], X_SHAPE[1:])
lp_post_quad = -jnp.abs(jax.random.normal(prng_keys[4], X_SHAPE[1:]))
lp_post_lin = jax.random.normal(prng_keys[5], X_SHAPE[1:])
lp_pre = (lp_pre_quad, lp_pre_lin)
lp_post = (lp_post_quad, lp_post_lin)
affine_fn = lambda x: x * weight_diag + bias
lagrangian_form = QuadLinDiagForm()
opt_instance = verify_utils.InnerVerifInstance(
affine_fns=[affine_fn],
bounds=self.bounds,
is_first=False,
is_last=False,
lagrangian_form_pre=lagrangian_form,
lagrangian_form_post=lagrangian_form,
lagrange_params_pre=lp_pre,
lagrange_params_post=lp_post,
idx=0,
spec_type=verify_utils.SpecType.ADVERSARIAL,
affine_before_relu=True)
# run PGA on problem
pga_opt = pga.PgaStrategy(n_iter=200, lr=0.01)
value_pga = pga_opt.solve_max(
inner_dual_vars=None,
opt_instance=opt_instance,
key=prng_keys[6],
step=0)
def value_per_component(x):
"""Objective function per component."""
y = jax.nn.relu(weight_diag * x + bias)
return (0.5 * lp_post_quad * y**2 + lp_post_lin * y -
(0.5 * lp_pre_quad * x**2 + lp_pre_lin * x))
# closed-form unconstrained solution if relu is passing
x_opt_passing = (lp_pre_lin - lp_post_lin * weight_diag -
lp_post_quad * weight_diag * bias) / (
weight_diag**2 * lp_post_quad - lp_pre_quad)
# project on feasible set where relu is passing
x_opt_passing = jnp.clip(
x_opt_passing,
a_min=jnp.maximum(-bias / weight_diag, self.bounds[0].lb),
a_max=self.bounds[0].ub)
value_opt_passing = value_per_component(x_opt_passing)
# closed-form unconstrained solution if relu is non-passing
x_opt_nonpassing = -lp_pre_lin / lp_pre_quad
# project on feasible set where relu is not passing
x_opt_nonpassing = jnp.clip(
x_opt_nonpassing,
a_min=self.bounds[0].lb,
a_max=jnp.minimum(-bias / weight_diag, self.bounds[0].ub))
value_opt_nonpassing = value_per_component(x_opt_nonpassing)
# best of candidate solutions (each optimal on their subdomain) gives the
# global solution
x_opt = jnp.where(value_opt_passing > value_opt_nonpassing, x_opt_passing,
x_opt_nonpassing)
# corresponding optimal objective value
value_opt = jnp.sum(value_per_component(x_opt))
np.testing.assert_almost_equal(value_pga, value_opt, decimal=2)
def test_final_problem(self):
prng_keys = jax.random.split(self.prng_key, 4)
# create a decomposable quadratic problem (see value_per_component further)
# we use absolute values to ensure that the resulting problem is concave,
# so that PGA finds the global solution.
objective = jax.random.normal(prng_keys[0], X_SHAPE[1:])
constant = jnp.zeros([])
lp_pre = jnp.abs(jax.random.normal(prng_keys[2], X_SHAPE[1:]))
affine_fn = lambda x: jnp.sum(x * objective) + constant
lagrangian_form = QuadDiagForm()
opt_instance = verify_utils.InnerVerifInstance(
affine_fns=[affine_fn],
bounds=self.bounds,
lagrangian_form_pre=lagrangian_form,
lagrangian_form_post=lagrangian_form,
is_first=False,
is_last=True,
lagrange_params_pre=lp_pre,
lagrange_params_post=None,
idx=0,
spec_type=verify_utils.SpecType.ADVERSARIAL,
affine_before_relu=True)
# run PGA on problem
pga_opt = pga.PgaStrategy(n_iter=500, lr=0.01)
value_pga = pga_opt.solve_max(
inner_dual_vars=None,
opt_instance=opt_instance,
key=prng_keys[3],
step=0)
def value_per_component(x):
"""Objective function per component."""
return -0.5 * lp_pre * x**2 + objective * x
# closed-form solution for the decomposable problem
x_opt = jnp.clip(objective / lp_pre, a_min=-1, a_max=1)
# corresponding optimal objective value
value_opt = jnp.sum(value_per_component(x_opt))
np.testing.assert_almost_equal(value_pga, value_opt, decimal=3)
def test_integration_combined_layer(self):
prng_keys = jax.random.split(self.prng_key, 4)
dim_1 = X_SHAPE[1]
dim_2 = dim_1 + 1
weights_1 = jax.random.normal(prng_keys[0], [dim_1, dim_2])
bias_1 = jnp.zeros([dim_2])
lp_pre = jnp.abs(jax.random.normal(prng_keys[1], [1, dim_1]))
lagrangian_form = QuadDiagForm()
weights_2 = jax.random.normal(prng_keys[2], [dim_2, 1])
bias_2 = jnp.zeros([])
bounds_2 = [
sdp_utils.IntBound(
lb=-jnp.ones([1, dim_2]),
ub=jnp.ones([1, dim_2]),
lb_pre=None,
ub_pre=None),
sdp_utils.IntBound(lb=None, ub=None, lb_pre=None, ub_pre=None),
]
opt_instance_1 = verify_utils.InnerVerifInstance(
affine_fns=[lambda x: x @ weights_1 + bias_1],
bounds=self.bounds,
lagrangian_form_pre=lagrangian_form,
lagrangian_form_post=lagrangian_form,
is_first=False,
is_last=False,
lagrange_params_pre=lp_pre,
lagrange_params_post=None,
idx=0,
spec_type=verify_utils.SpecType.ADVERSARIAL,
affine_before_relu=True)
opt_instance_2 = verify_utils.InnerVerifInstance(
affine_fns=[lambda x: x @ weights_2 + bias_2],
bounds=bounds_2,
lagrangian_form_pre=lagrangian_form,
lagrangian_form_post=lagrangian_form,
is_first=False,
is_last=True,
lagrange_params_pre=None,
lagrange_params_post=None,
idx=1,
spec_type=verify_utils.SpecType.ADVERSARIAL,
affine_before_relu=True)
opt_instance = dual_build._merge_instances(
opt_instance_1,
opt_instance_2,
)
# run PGA on problem
pga_opt = pga.PgaStrategy(n_iter=5, lr=0.01)
pga_opt.solve_max(
inner_dual_vars=None,
opt_instance=opt_instance,
key=prng_keys[3],
step=None)
if __name__ == '__main__':
absltest.main()
| jax_verify-master | jax_verify/tests/functional_lagrangian/pga_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Crown bound propagation used in SDP verification."""
import functools
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.sdp_verify import utils
from jax_verify.src import bound_propagation
from jax_verify.src.nonconvex import duals
from jax_verify.src.nonconvex import nonconvex
from jax_verify.src.nonconvex import optimizers
from jax_verify.src.nonconvex.optimizers import LinesearchFistaOptimizer as FistaOptimizer
IntBound = utils.IntBound
def boundprop(params, x, epsilon, input_bounds, boundprop_type,
**extra_boundprop_kwargs):
"""Computes interval bounds for NN intermediate activations.
Args:
params: Parameters for the NN.
x: Batch of inputs to NN (dimension 2 for MLP or 4 for CNN)
epsilon: l-inf perturbation to the input.
input_bounds: Valid lower and upper for the NN as a tuple -- e.g. (0., 1.)
boundprop_type: string, indicating method used for bound propagation, e.g.
'crown_ibp' or 'nonconvex'
**extra_boundprop_kwargs: any additional kwargs, passed directly to
underlying boundprop method
Returns:
layer_bounds: upper and lower bounds across the layers of the NN as a list
of IntBound-s.
"""
boundprop_type_to_method = {
'crown_ibp': _crown_ibp_boundprop,
'nonconvex': _nonconvex_boundprop,
}
assert boundprop_type in boundprop_type_to_method, 'invalid boundprop_type'
boundprop_method = boundprop_type_to_method[boundprop_type]
return boundprop_method(params, x, epsilon, input_bounds,
**extra_boundprop_kwargs)
def _crown_ibp_boundprop(params, x, epsilon, input_bounds):
"""Runs CROWN-IBP for each layer separately."""
def get_layer_act(layer_idx, inputs):
act = utils.predict_cnn(params[:layer_idx], inputs)
return act
initial_bound = jax_verify.IntervalBound(
jnp.maximum(x - epsilon, input_bounds[0]),
jnp.minimum(x + epsilon, input_bounds[1]))
out_bounds = [IntBound(
lb_pre=None, ub_pre=None, lb=initial_bound.lower, ub=initial_bound.upper)]
for i in range(1, len(params) + 1):
fwd = functools.partial(get_layer_act, i)
bound = jax_verify.crownibp_bound_propagation(fwd, initial_bound)
out_bounds.append(
IntBound(lb_pre=bound.lower, ub_pre=bound.upper,
lb=jnp.maximum(0, bound.lower),
ub=jnp.maximum(0, bound.upper)))
return out_bounds
def _nonconvex_boundprop(params, x, epsilon, input_bounds,
nonconvex_boundprop_steps=100,
nonconvex_boundprop_nodes=128):
"""Wrapper for nonconvex bound propagation."""
# Get initial bounds for boundprop
init_bounds = utils.init_bound(x, epsilon, input_bounds=input_bounds,
add_batch_dim=False)
# Build fn to boundprop through
all_act_fun = functools.partial(utils.predict_cnn, params,
include_preactivations=True)
# Collect the intermediate bounds.
input_bound = jax_verify.IntervalBound(init_bounds.lb, init_bounds.ub)
optimizer = optimizers.OptimizingConcretizer(
FistaOptimizer(num_steps=nonconvex_boundprop_steps),
max_parallel_nodes=nonconvex_boundprop_nodes)
nonconvex_algorithm = nonconvex.nonconvex_algorithm(
duals.WolfeNonConvexBound, optimizer)
all_outputs, _ = bound_propagation.bound_propagation(
nonconvex_algorithm, all_act_fun, input_bound)
_, intermediate_nonconvex_bounds = all_outputs
bounds = [init_bounds]
for nncvx_bound in intermediate_nonconvex_bounds:
bounds.append(utils.IntBound(lb_pre=nncvx_bound.lower,
ub_pre=nncvx_bound.upper,
lb=jnp.maximum(nncvx_bound.lower, 0),
ub=jnp.maximum(nncvx_bound.upper, 0)))
return bounds
| jax_verify-master | jax_verify/extensions/sdp_verify/boundprop_utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Neural network verification with cvxpy for correctness checks."""
import cvxpy as cp
from cvxpy.reductions.solvers.defines import INSTALLED_MI_SOLVERS as MIP_SOLVERS
import jax.numpy as jnp
from jax_verify.extensions.sdp_verify import utils
import numpy as np
import scipy
def solve_mip_mlp_elided(verif_instance):
"""Compute optimal attack loss for MLPs, via exactly solving MIP."""
assert MIP_SOLVERS, 'No MIP solvers installed with cvxpy.'
assert verif_instance.type == utils.VerifInstanceTypes.MLP_ELIDED
params, bounds, obj, obj_const = (
verif_instance.params, verif_instance.bounds, verif_instance.obj,
verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
on_state = []
post_activations = [cp.Variable((1, layer_sizes[0]))]
pre_activations = []
constraints = []
for (i, param) in enumerate(params):
W, b = param
b = jnp.reshape(b, (1, b.size))
on_state.append(cp.Variable((1, b.size), boolean=True))
pre_activations.append(cp.Variable((1, b.size)))
post_activations.append(cp.Variable((1, b.size)))
# Linear relaxation of ReLU constraints
constraints += [pre_activations[-1] == post_activations[-2]@W + b]
constraints += [post_activations[-1] >= pre_activations[-1]]
constraints += [post_activations[-1] >= 0]
# If ReLU is off, post activation is non-positive. Otherwise <= ub
constraints += [post_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub)]
# If ReLU is off, pre-activation is non-positive. Otherwise <= ub_pre
constraints += [pre_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub_pre)]
# If ReLU is on, post-activation == pre-activation
# Define <= here, >= constraint added above.
constraints += [post_activations[-1]-pre_activations[-1] <=
cp.multiply(1-on_state[-1],
bounds[i+1].ub-bounds[i+1].lb_pre)]
# Optionally, include IBP bounds to speed up MIP solving
# Post activations are within bounds
# i=0 case encodes input constraint
for (i, post) in enumerate(post_activations):
constraints += [post <= bounds[i].ub]
constraints += [post >= bounds[i].lb]
# # Pre activations are within bounds
for (i, pre) in enumerate(pre_activations):
constraints += [pre <= bounds[i+1].ub_pre]
constraints += [pre >= bounds[i+1].lb_pre]
# Set objective over final post-activations
obj_cp = cp.sum(cp.multiply(obj, post_activations[-1]))
# Define and solve problem
problem = cp.Problem(cp.Maximize(obj_cp), constraints)
# NB: Originally, we used cp.ECOS_BB here, but cvxpy 1.1 drops support,
# so we just use the first available MIP solver (which is dependent on user
# installation).
problem.solve(solver=MIP_SOLVERS[0])
# Report results
info = {
'problem': problem,
'post': post_activations,
'pre': pre_activations,
}
return obj_cp.value + obj_const, info
def solve_lp_primal_elided(verif_instance):
"""Compute optimal attack loss for MLPs against LP relaxation."""
assert verif_instance.type == utils.VerifInstanceTypes.MLP_ELIDED
params, bounds, obj, obj_const = (
verif_instance.params, verif_instance.bounds, verif_instance.obj,
verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
post_activations = [cp.Variable((1, layer_sizes[0]))]
constraints = []
for (i, param) in enumerate(params):
W, b = param
b = jnp.reshape(b, (1, b.size))
post_activations.append(cp.Variable((1, b.size)))
pre_act = post_activations[-2]@W + b
post_act = post_activations[-1]
# Linear relaxation of ReLU constraints
constraints += [post_act >= pre_act]
constraints += [post_act >= 0]
# Triangle relaxation
l = np.minimum(0., bounds[i+1].lb_pre)
u = np.maximum(0., bounds[i+1].ub_pre)
constraints += [cp.multiply(u, pre_act) - cp.multiply(u, l) -
cp.multiply(u - l, post_act) >= 0]
# Optionally, include IBP bounds to speed up MIP solving
# Post activations are within bounds
# i=0 case encodes input constraint
for (i, post) in enumerate(post_activations[:1]):
constraints += [post <= bounds[i].ub]
constraints += [post >= bounds[i].lb]
# Set objective over final post-activations
obj_cp = cp.sum(cp.multiply(obj, post_activations[-1]))
# Define and solve problem
problem = cp.Problem(cp.Maximize(obj_cp), constraints)
problem.solve(solver=cp.ECOS)
# Report results
info = {
'problem': problem,
'post': post_activations,
}
return obj_cp.value + obj_const, info
def solve_sdp_mlp_elided(verif_instance, solver_name='SCS', verbose=False,
check_feasibility=False, feasibility_margin=0.0):
"""Compute exact SDP relaxation verified bound, following Raghunathan 18.
Args:
verif_instance: VerifInstance namedtuple
solver_name: string, SDP solver, either 'SCS' or 'CVXOPT'
verbose: bool, controls verbose output from SDP solver
check_feasibility: bool, if True, try to find any verified certificate,
rather than tightest possible lower bound
feasibility_margin: float, when `check_feasibility=True`, verify that
adversary cannot decrease objective below `feasibility_margin`.
Returns:
obj_value: either a float, the bound on objective (check_feasibility=False),
or a bool, whether verification succeeded (check_feasibility=True)
info: dict of other info, e.g. solver status, values found by solver
"""
assert verif_instance.type == utils.VerifInstanceTypes.MLP_ELIDED
params, input_bounds, bounds, obj, obj_const = (
verif_instance.params, verif_instance.input_bounds, verif_instance.bounds,
verif_instance.obj, verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
assert len(bounds) == len(layer_sizes) + 1
# Matrix P, where P = vv' before SDP relaxation, and v = [1, x_0, ..., x_L]
P = cp.Variable((1 + sum(layer_sizes), 1 + sum(layer_sizes)))
# Matrix constraints
constraints = [
P == P.T,
P >> 0,
P[0][0] == 1.,
]
cumsum_sizes = [0] + list(np.cumsum(layer_sizes))
def _slice(i):
"""Helper method for `p_slice`."""
if i == -1:
return 0
else:
return slice(1 + cumsum_sizes[i], 1 + cumsum_sizes[i+1])
def p_slice(i, j):
"""Symbolic indexing into matrix P.
Args:
i: an integer, either -1, or in [0, num_layers).
j: an integer, either -1, or in [0, num_layers).
Returns:
slice object, used to index into P. In the QCQP, if P = vv', where
v = [1, x_0, ..., x_L], then p_slice(i, j) gives submatrix corresponding
to P[x_i x_j']. When j = -1, this returns P[x_i].
"""
return P[_slice(i), _slice(j)]
diag = cp.atoms.affine.diag.diag
# Input/IBP constraints
# TODO: Check if these are actually necessary
if input_bounds is not None:
constraints += [p_slice(0, -1) >= input_bounds[0]]
constraints += [p_slice(0, -1) <= input_bounds[1]]
for i in range(len(layer_sizes)):
lb = bounds[i].lb[0]
ub = bounds[i].ub[0]
assert lb.shape == ub.shape == (layer_sizes[i],)
assert diag(p_slice(i, i)).shape == (layer_sizes[i],)
assert p_slice(i, -1).shape == (layer_sizes[i],)
constraints += [diag(p_slice(i, i)) <=
cp.multiply(lb + ub, p_slice(i, -1)) - cp.multiply(lb, ub)]
# Relu / weight constraints
for i, param in enumerate(params):
W, b = param
constraints += [p_slice(i+1, -1) >= 0]
constraints += [p_slice(i+1, -1) >= p_slice(i, -1)@W + b]
# Encode constraint P[zz'] = WP[xz']. Since our networks use z=xW+b rather
# than z=Wx+b, we use W'x = xW (which holds when x is a vector)
constraints += [
diag(p_slice(i+1, i+1)) ==
diag(W.T@p_slice(i, i+1)) + cp.multiply(b, p_slice(i+1, -1))]
# Set objective over final post-activations
final_idx = len(layer_sizes)-1
x_final = P[_slice(final_idx), _slice(-1)]
obj_cp = cp.sum(cp.multiply(obj[0], x_final))
# Define and solve problem
if check_feasibility:
constraints += [obj_cp + obj_const >= feasibility_margin]
problem = cp.Problem(cp.Maximize(cp.Constant(0.)), constraints)
else:
problem = cp.Problem(cp.Maximize(obj_cp), constraints)
solver = getattr(cp, solver_name)
problem.solve(solver=solver, verbose=verbose)
# Report results
info = {
'problem': problem,
'P': P,
'constraints': constraints,
}
print('status', problem.status)
if check_feasibility:
# If solver shows problem is infeasible for adversary, this is a certificate
obj_value = problem.status == 'infeasible'
else:
obj_value = obj_cp.value + obj_const if obj_cp.value is not None else -99999
return obj_value, info
def _violation(arr):
return np.maximum(0, (np.max(arr)))
def _violation_leq(arr1, arr2):
"""Get violation for constraint `arr1 <= arr2`."""
return _violation(arr1 - arr2)
def check_sdp_bounds_numpy(P, verif_instance, input_bounds=(0, 1)):
"""Check SDP solution for 1-hidden MLP satisfies constraints in numpy."""
params, bounds, obj, const = (
verif_instance.params, verif_instance.bounds, verif_instance.obj,
verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
assert len(layer_sizes) == 2, 'Relu MLP with 1 hidden layer'
assert len(params) == 1, 'Relu MLP with 1 hidden layer'
assert P.shape == (1+sum(layer_sizes), 1+sum(layer_sizes))
violations = {}
# Matrix constraints
violations['P = P.T'] = _violation(np.abs(P - P.T))
violations['P[0][0] = 1'] = abs(P[0][0] - 1.0)
eig_vals, _ = scipy.linalg.eigh(P)
violations['P >= 0 (SDP)'] = _violation(-eig_vals)
x = P[0, 1:1+layer_sizes[0]]
z = P[0, 1+layer_sizes[0]:]
xx = P[1:1+layer_sizes[0], 1:1+layer_sizes[0]]
xz = P[1:1+layer_sizes[0], 1+layer_sizes[0]:]
zz = P[1+layer_sizes[0]:, 1+layer_sizes[0]:]
# Relu constraints
w, b = params[0]
violations['relu_0'] = _violation_leq(0, z)
violations['relu_wx_b'] = _violation_leq(np.matmul(x, w) + b, z)
violations['relu_eq'] = _violation(np.abs(
np.diag(np.matmul(w.T, xz)) + b*z - np.diag(zz)))
# Input bound constraints
violations['input_lb'] = _violation_leq(input_bounds[0], x)
violations['input_ub'] = _violation_leq(x, input_bounds[1])
# Interval bound constraints
for i in range(len(layer_sizes)):
lb = bounds[i].lb[0]
ub = bounds[i].ub[0]
x_slice = slice(1+sum(layer_sizes[:i]), 1+sum(layer_sizes[:i+1]))
x = P[0, x_slice]
xx = P[x_slice, x_slice]
violations[f'lay{i}_bound'] = _violation_leq(np.diag(xx), (lb+ub)*x - lb*ub)
# Objective
obj = const + np.sum(obj * z)
return obj, violations
| jax_verify-master | jax_verify/extensions/sdp_verify/cvxpy_verify.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Facilities to construct SDP Verification problem instances."""
from typing import Callable, Dict, Mapping, Optional, Sequence, Tuple, Union
import jax.numpy as jnp
from jax_verify.extensions.sdp_verify import utils
from jax_verify.src import bound_propagation
from jax_verify.src import graph_traversal
from jax_verify.src import synthetic_primitives
from jax_verify.src.types import Index, Primitive, Tensor # pylint: disable=g-multiple-import
import numpy as np
Bound = bound_propagation.Bound
InputBound = graph_traversal.InputBound
TransformContext = graph_traversal.TransformContext
SdpDualVerifInstance = utils.SdpDualVerifInstance
class SdpReluProblem:
"""SDP problem to optimise over a ReLU-based network."""
def __init__(
self,
boundprop_transform: bound_propagation.BoundTransform,
spec_fn: Callable[..., Tensor],
*input_bounds: Bound,
):
"""Initialises a ReLU-based network SDP problem.
Args:
boundprop_transform: Transform to supply concrete bounds.
spec_fn: Network to verify.
*input_bounds: Concrete bounds on the network inputs.
"""
self._output_node, self._env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(
_SdpTransform(boundprop_transform)),
spec_fn, *input_bounds)
def build_sdp_verification_instance(self) -> SdpDualVerifInstance:
dual_shapes, dual_types = self._dual_shapes_and_types()
return SdpDualVerifInstance(
make_inner_lagrangian=self._build_lagrangian_fn,
bounds=self._bounds(), dual_shapes=dual_shapes, dual_types=dual_types)
def _dual_shapes_and_types(self) -> Tuple[
Sequence[Union[Mapping[str, np.ndarray], np.ndarray]],
Sequence[Union[Mapping[str, utils.DualVarTypes], utils.DualVarTypes]]]:
"""Returns shapes and types of dual vars."""
dual_shapes = []
dual_types = []
num_kappa = 1
for node in self._env.values():
if isinstance(node, Bound) and not node.is_affine: # pytype: disable=attribute-error # jax-ndarray
node_dual_shapes, node_dual_types = node.dual_shapes_and_types() # pytype: disable=attribute-error # jax-ndarray
dual_shapes.append(node_dual_shapes)
dual_types.append(node_dual_types)
num_kappa += np.prod(node.shape[1:], dtype=np.int32)
dual_shapes.append(np.array([1, num_kappa]))
dual_types.append(utils.DualVarTypes.INEQUALITY)
return dual_shapes, dual_types
def _bounds(self) -> Sequence[utils.IntBound]:
return [
utils.IntBound(lb=node.lower, ub=node.upper, lb_pre=None, ub_pre=None)
for node in self._env.values()
if isinstance(node, Bound) and not node.is_affine] # pytype: disable=attribute-error # jax-ndarray
def _build_lagrangian_fn(
self,
dual_vars: Sequence[Union[Mapping[str, Tensor], Tensor]],
) -> Callable[[Tensor], Tensor]:
"""Returns a function that computes the Lagrangian for a ReLU network.
This function assumes `spec_fn` represents a feedforward ReLU network i.e.
x_{i+1} = relu(W_i x_i + b_i), with a final linear objective. The network
may be branched (e.g. ResNets or DenseNets), including the objective part
which may, for example, depend linearly on any of the intermediate
ReLU activations.
It defines the Lagrangian by applying the linear/affine functions to the
inputs and all intermediate activations, and encoding the Lagrangian
terms for each of the constraints defining the ReLU network. It then returns
this function.
Args:
dual_vars: Dual variables for each ReLU node.
Returns:
Function that computes Lagrangian L(x) with fixed `dual_vars`.
"""
nodes = [node for node in self._env.values()
if isinstance(node, Bound) and not node.is_affine] # pytype: disable=attribute-error # jax-ndarray
assert len(dual_vars) == len(nodes) + 1
def lagrangian(xs):
"""Computes Lagrangian L(x) with fixed `dual_vars`."""
assert all([x.shape[0] == 1 for x in xs]), 'no batch mode support'
assert len(xs) == len(nodes)
ys = {node.index: x for node, x in zip(nodes, xs)}
for node in self._env.values():
if isinstance(node, SdpNode):
node.forward_propagate(ys)
lag = jnp.reshape(ys[self._output_node.index], ()) # pytype: disable=attribute-error # jax-ndarray
for node, node_dual_vars, x in zip(nodes, dual_vars[:-1], xs):
lag += node.lagrangian_contrib(node_dual_vars, x, ys)
return lag
return lagrangian
class SdpNode(Bound):
"""Node in the ReLU network to be optimised using SDP."""
def __init__(
self,
index: Index,
base_bound: Bound,
is_input: bool,
preact_node: Optional['SdpNode'],
forward_propagate_fn: Callable[[Dict[Index, Tensor]], None]):
self._index = index
self._base_bound = base_bound
self._is_input = is_input
self._preact_node = preact_node
self._forward_propagate_fn = forward_propagate_fn
@property
def index(self) -> Index:
return self._index
@property
def base_bound(self) -> Bound:
return self._base_bound
@property
def lower(self) -> Tensor:
"""Concrete lower bound."""
return self._base_bound.lower
@property
def upper(self) -> Tensor:
"""Concrete upper bound."""
return self._base_bound.upper
@property
def is_affine(self) -> bool:
return not self._is_input and self._preact_node is None
def dual_shapes_and_types(self) -> Tuple[
Mapping[str, np.ndarray], Mapping[str, utils.DualVarTypes]]:
"""Returns dual shapes and types for this ReLU (or input) layer."""
if self.is_affine:
raise ValueError('No duals for affine layer')
shape = np.array([1] + list(self.shape[1:]))
dual_shapes = {'nu': shape}
dual_types = {'nu': utils.DualVarTypes.INEQUALITY}
if not self._is_input:
dual_shapes.update({
'lam': shape,
'muminus': shape,
'muplus': shape,
})
dual_types.update({
'lam': utils.DualVarTypes.EQUALITY,
'muminus': utils.DualVarTypes.INEQUALITY,
'muplus': utils.DualVarTypes.INEQUALITY,
})
return dual_shapes, dual_types
def lagrangian_contrib(
self,
dual_vars: Mapping[str, Tensor],
x: Tensor,
ys: Dict[Index, Tensor],
) -> Tensor:
"""Returns contribution of this ReLU (or input) layer to the Lagrangian.
Args:
dual_vars: Dual variables for this node.
x: Primal value for this activation (or input).
ys: Primal values for all pre-activations.
Dual variables correspond to:
lam: ReLU quadratic constraint: z^2 = z*(Wx)
nu: IBP quadratic constraint: x^2 <= (l+u)*x - l*u
muminus: x'>=0
muplus: x'>=Wx+b
"""
if self.is_affine:
raise ValueError('No Lagrangian contribution for affine layer')
lag = 0.
if not self._is_input:
y = ys[self._preact_node.index]
# Lagrangian for constraint x' * x' = x' * (Wx+b) where x'=ReLU(Wx+b)
lag += jnp.sum(dual_vars['lam'] * x * (y - x))
# Lagrangian for the constraint x'>=Wx+b
lag += jnp.sum(dual_vars['muplus'] * (x - y))
# Lagrangian for the constraint x'>=0
lag += jnp.sum(dual_vars['muminus'] * x)
# Lagrangian for IBP constraint (x-l)(x-u) <= 0
if 'nu' in dual_vars:
lag += -jnp.sum(dual_vars['nu'] *
(x - self.lower) * (x - self.upper))
return lag # pytype: disable=bad-return-type # jax-ndarray
def forward_propagate(self, xs: Dict[Index, Tensor]):
self._forward_propagate_fn(xs)
class _SdpTransform(graph_traversal.GraphTransform[SdpNode]):
"""Converts a specification function into an SDP problem."""
def __init__(self, boundprop_transform: bound_propagation.BoundTransform):
super().__init__()
self._boundprop_transform = boundprop_transform
def input_transform(
self,
context: TransformContext,
input_bound: InputBound,
) -> SdpNode:
bound = self._boundprop_transform.input_transform(context, input_bound)
return SdpNode(context.index, bound, True, None, lambda ys: None)
def primitive_transform(
self,
context: TransformContext,
primitive: Primitive,
*args: Union[SdpNode, Tensor],
**params,
) -> SdpNode:
arg_bounds = [arg.base_bound if isinstance(arg, SdpNode) else arg
for arg in args]
bound, = self._boundprop_transform.equation_transform(
context, primitive, *arg_bounds, **params)
if primitive == synthetic_primitives.relu_p:
preact, = args
return SdpNode(context.index, bound, False, preact, lambda ys: None)
else:
def forward_propagate(ys: Dict[Index, Tensor]):
xs = [ys[arg.index] if isinstance(arg, Bound) else arg for arg in args]
ys[context.index] = primitive.bind(*xs, **params)
return SdpNode(context.index, bound, False, None, forward_propagate)
| jax_verify-master | jax_verify/extensions/sdp_verify/problem_from_graph.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Facilities to construct SDP Verification problem instances."""
import collections
import jax.numpy as jnp
from jax_verify.extensions.sdp_verify import utils
import numpy as np
################## SDP Verification Instances ####################
# Dual variables correspond to:
# lam: ReLU quadratic constraint: z^2 = z*(Wx)
# nu: IBP quadratic constraint: x^2 <= (l+u)*x - l*u
# nu_quad: IBP quadratic matrix constraint: (x_i - l_i)(x_j - u_j) <= 0
# muminus: x'>=0
# muminus2: Triangle linear Relu relaxation - u(Wx+b) - ul - (u-l)x' >= 0
# where l = min(l, 0), u = max(u, 0)
# muplus: x'>=Wx+b
DualVar = collections.namedtuple(
'DualVar', ['lam', 'nu', 'nu_quad', 'muminus', 'muplus', 'muminus2'])
DualVarFin = collections.namedtuple('DualVarFin', ['nu', 'nu_quad'])
DEFAULT_DISABLED_DUAL_VARS = ('nu_quad', 'muminus2')
NECESSARY_DUAL_VARS = ('lam', 'muplus', 'muminus')
def make_relu_network_lagrangian(dual_vars, params, bounds, obj):
"""Returns a function that computes the Lagrangian for a ReLU network.
This function assumes `params` represent a feedforward ReLU network i.e.
x_{i+1} = relu(W_i x_i + b_i). It defines the Lagrangian by applying the
objective `obj` to the final layer activations, and encoding the Lagrangian
terms for each of the constraints defining the ReLU network. It then returns
this function.
Args:
dual_vars: A length L+1 list of dual variables at each layer
params: A length L list of (W, b) pairs, elided network weights
bounds: A length L+1 list of `IntBound`s, elided bounds at each layer
obj: function, taking final layer activations as input
Returns:
Function that computes Lagrangian L(x) with fixed `dual_vars`.
"""
layer_sizes = utils.layer_sizes_from_bounds(bounds)
def lagrangian(xs_list):
"""Computes Lagrangian L(x) with fixed `dual_vars`."""
assert all([x.shape[0] == 1 for x in xs_list]), 'no batch mode support'
lag = obj(xs_list[-1])
for i in range(len(layer_sizes)):
if i < len(params):
y = utils.fwd(xs_list[i], params[i])
# Lagrangian for constraint x' * x' = x' * (Wx+b) where x'=ReLU(Wx+b)
lag += (jnp.sum(dual_vars[i].lam * xs_list[i + 1] *
(y - xs_list[i + 1])))
# Lagrangian for the constraint x'>=Wx+b
lag += jnp.sum(dual_vars[i].muplus * (xs_list[i + 1] - y))
if dual_vars[i].muminus2.shape:
# Lagrangian for u(Wx+b) - ul - (u-l)x' >= 0, where
# l = min(l, 0) and u = max(u, 0)
raise NotImplementedError('dropped support for muminus2')
# Lagrangian for the constraint x'>=0
lag += jnp.sum(dual_vars[i].muminus * xs_list[i + 1])
# Lagrangian for IBP constraint (x-l)(x-u) <= 0
if dual_vars[i].nu.shape:
lag += -jnp.sum(dual_vars[i].nu *
(xs_list[i] - bounds[i].lb) *(xs_list[i] - bounds[i].ub)
)
if dual_vars[i].nu_quad.shape:
# IBP quadratic matrix constraint: (x_i - l_i)(x_j - u_j) <= 0
lag += -jnp.sum(dual_vars[i].nu_quad *
jnp.matmul((xs_list[i]-bounds[i].lb).T,
xs_list[i]-bounds[i].ub))
return lag
return lagrangian
def relu_robustness_verif_instance_to_sdp(verif_instance):
"""Convert solver-agnostic VerifInstance to SdpDualVerifInstance."""
assert verif_instance.type in [
utils.VerifInstanceTypes.MLP_ELIDED, utils.VerifInstanceTypes.CNN_ELIDED]
elided_bounds = verif_instance.bounds[:-1]
dual_shapes, dual_types = get_dual_shapes_and_types(elided_bounds)
def obj(x_final):
out = jnp.sum(x_final * jnp.reshape(verif_instance.obj, x_final.shape))
return out + verif_instance.const
def make_inner_lagrangian(dual_vars):
return make_relu_network_lagrangian(
dual_vars, verif_instance.params, elided_bounds, obj)
return utils.SdpDualVerifInstance(
make_inner_lagrangian=make_inner_lagrangian,
bounds=elided_bounds,
dual_shapes=dual_shapes,
dual_types=dual_types)
def make_sdp_verif_instance(verif_instance):
if isinstance(verif_instance, utils._AdvRobustnessVerifInstance): # pylint: disable=protected-access
return relu_robustness_verif_instance_to_sdp(verif_instance)
else:
raise NotImplementedError('unrecognized verif_instance type')
def make_vae_sdp_verif_instance(params, data_x, bounds):
"""Make SdpDualVerifInstance for VAE reconstruction error spec."""
elided_params = params[:-1]
elided_bounds = bounds[:-1]
dual_shapes, dual_types = get_dual_shapes_and_types(elided_bounds)
def recon_loss(x_final):
x_hat = utils.predict_cnn(params[-1:], x_final).reshape(1, -1)
return jnp.sum(jnp.square(data_x.reshape(x_hat.shape) - x_hat))
def make_inner_lagrangian(dual_vars):
return make_relu_network_lagrangian(
dual_vars, elided_params, elided_bounds, recon_loss)
return utils.SdpDualVerifInstance(
make_inner_lagrangian=make_inner_lagrangian,
bounds=elided_bounds,
dual_shapes=dual_shapes,
dual_types=dual_types)
def make_vae_semantic_spec_params(x, vae_params, classifier_params):
"""Defines network f(z_noise) = classifier(reconstruct(x, z_noise))."""
# Setup - encoder fwd pass
encoder_params, decoder_params = vae_params
encoder_mu_params = encoder_params[:-1]
encoder_sigmasq_params = encoder_params[:-2] + [encoder_params[-1]]
mu_z = utils.predict_cnn(encoder_mu_params, x)
log_sigmasq_z = utils.predict_cnn(encoder_sigmasq_params, x)
sigmasq_z = jnp.exp(log_sigmasq_z)
# Combine the reparameterization with the first decoder layer
# z0 = mu + sigma * z
# z1 = jnp.dot(z0, W) + b
# = jnp.dot(mu + sigma * z, W) + b
# = jnp.dot(z, sigma * W) + [b + jnp.dot(mu, W)]
assert isinstance(decoder_params[0], tuple)
W0_orig, b0_orig = decoder_params[0]
W0 = W0_orig * jnp.reshape(jnp.sqrt(sigmasq_z), (-1, 1))
b0 = b0_orig + jnp.dot(mu_z, W0_orig)
# Now the network is just concatenation of modified decoder + classifier
# This is also applying a Relu to decoder output, but that's fine
combined_params = [(W0, b0)] + decoder_params[1:] + classifier_params
return combined_params
def get_dual_shapes_and_types(bounds_elided):
"""Get shapes and types of dual vars."""
dual_shapes = []
dual_types = []
layer_sizes = utils.layer_sizes_from_bounds(bounds_elided)
for it in range(len(layer_sizes)):
m = layer_sizes[it]
m = [m] if isinstance(m, int) else list(m)
if it < len(layer_sizes)-1:
n = layer_sizes[it + 1]
n = [n] if isinstance(n, int) else list(n)
shapes = {
'lam': [1] + n,
'nu': [1] + m,
'muminus': [1] + n,
'muplus': [1] + n,
'nu_quad': [], 'muminus2': [],
}
types = {
'lam': utils.DualVarTypes.EQUALITY,
'nu': utils.DualVarTypes.INEQUALITY,
'muminus': utils.DualVarTypes.INEQUALITY,
'muplus': utils.DualVarTypes.INEQUALITY,
'nu_quad': utils.DualVarTypes.INEQUALITY,
'muminus2': utils.DualVarTypes.INEQUALITY,
}
dual_shapes.append(DualVar(**{
k: np.array(s) for k, s in shapes.items()}))
dual_types.append(DualVar(**types))
else:
shapes = {'nu': [1] + m, 'nu_quad': []}
types = {'nu': utils.DualVarTypes.INEQUALITY,
'nu_quad': utils.DualVarTypes.INEQUALITY}
dual_shapes.append(DualVarFin(**{
k: np.array(s) for k, s in shapes.items()}))
dual_types.append(DualVarFin(**types))
# Add kappa
N = sum([np.prod(np.array(i)) for i in layer_sizes])
dual_shapes.append(np.array([1, N+1]))
dual_types.append(utils.DualVarTypes.INEQUALITY)
return dual_shapes, dual_types
| jax_verify-master | jax_verify/extensions/sdp_verify/problem.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Small helper functions."""
import collections
import copy
import enum
import functools
import jax
import jax.lax as lax
import jax.numpy as jnp
import numpy as np
import optax
import tree
IntervalBound = collections.namedtuple(
'IntervalBound', ['lb', 'ub', 'lb_pre', 'ub_pre'])
IntBound = IntervalBound
_AdvRobustnessVerifInstance = collections.namedtuple(
'VerifInstance',
['params', 'params_full', 'input_bounds', 'bounds', 'obj', 'obj_orig',
'const', 'type'])
_VerifInstance = _AdvRobustnessVerifInstance # alias
_SdpDualVerifInstance = collections.namedtuple(
'SdpDualVerifInstance',
['bounds', 'make_inner_lagrangian', 'dual_shapes', 'dual_types'])
class SdpDualVerifInstance(_SdpDualVerifInstance):
"""A namedtuple specifying a verification instance for the dual SDP solver.
Fields:
* bounds: A list of bounds on post-activations at each layer
* make_inner_lagrangian: A function which takes ``dual_vars`` as input, and
returns another function, the inner lagrangian, which evaluates
Lagrangian(x, dual_vars) for any value ``x`` (the set of activations).
* dual_types: A pytree matching dual_vars specifying which dual_vars
should be non-negative.
* dual_shapes: A pytree matching dual_vars specifying shape of each var.
"""
default_conv_transpose = functools.partial(
lax.conv_transpose, dimension_numbers=('NHWC', 'HWIO', 'NHWC'))
################## Networks ######################
def mlp_layer_sizes(params):
# Dimension of input to each layer, and output of final layer
layer_sizes = [int(w.shape[0]) for (w, b) in params]
fin_w, _ = params[-1]
layer_sizes.append(int(fin_w.shape[1]))
return layer_sizes
def nn_layer_sizes(params):
"""Compute MLP sizes of the inputs/outputs for individual layers."""
assert not any([isinstance(x, dict) for x in params]), 'MLP only'
return mlp_layer_sizes(params)
def layer_sizes_from_bounds(bounds):
assert all([b.lb.shape[0] == 1 for b in bounds])
layer_sizes = [b.lb.shape[1:] for b in bounds]
layer_sizes = [s[0] if len(s) == 1 else s for s in layer_sizes]
return layer_sizes
def predict_mlp(params, inputs):
for W, b in params[:-1]:
outputs = jnp.dot(inputs, W) + b
inputs = jnp.maximum(outputs, 0)
W, b = params[-1]
return jnp.dot(inputs, W) + b
def fwd(inputs, layer_params):
"""JAX forward pass of Linear, Conv, or ConvTranspose."""
if isinstance(layer_params, dict):
# Conv/ConvTranspose: Reshape input if necessary:
if len(inputs.shape) < 4:
w = h = int(np.sqrt(inputs.shape[-1]/layer_params['n_cin']))
inputs = inputs.reshape(inputs.shape[0], h, w, layer_params['n_cin'])
W, b = layer_params['W'], np.reshape(layer_params['b'], [1, 1, 1, -1])
if 'transpose' in layer_params and layer_params['transpose']:
# ConvTranspose
return default_conv_transpose(
inputs, W, (layer_params['stride'], layer_params['stride']),
layer_params['padding']) + b
else:
# Normal Conv2D
dn = lax.conv_dimension_numbers(inputs.shape, W.shape,
('NHWC', 'HWIO', 'NHWC'))
return lax.conv_general_dilated(
inputs, W, (layer_params['stride'], layer_params['stride']),
layer_params['padding'], (1, 1), (1, 1), dn) + b
elif isinstance(layer_params, tuple):
# Linear fully-connected layer
# TODO: Figure out why we were dropping batch dim before here
inputs = (inputs.reshape(inputs.shape[0], -1)
if len(inputs.shape) == 4 else inputs)
(W, b) = layer_params
return jnp.dot(inputs, W) + b
elif callable(layer_params):
# Most general way of specifying an affine layer is to provide its function.
return layer_params(inputs)
else:
raise NotImplementedError('Unknown layer')
def predict_cnn(params, inputs, include_preactivations=False):
"""Forward pass for a CNN given parameters.
Args:
params: Parameters for the CNN. See make_cnn_params for syntax.
inputs: Inputs to CNN.
include_preactivations: bool. If True, also return pre-activations after
each matmul layer.
Returns:
act: Output from forward pass through CNN.
(Optional) layer_acts: Post-relu activation at each layer
"""
act = inputs
layer_preacts = []
for counter, layer_params in enumerate(params):
act = fwd(act, layer_params)
layer_preacts.append(act)
if counter < len(params) - 1:
# no relu on final layer
act = jnp.maximum(act, 0)
return act if not include_preactivations else (act, layer_preacts)
def get_network_activs(params, x):
assert len(x.shape) == 1, 'x should not have batch dim'
activs = [x]
for W, b in params:
x = jnp.matmul(x, W) + b
x = jnp.maximum(x, 0.)
activs.append(x)
return activs
def get_layer_params(fun_to_extract, example_input):
"""Extract the parameters from a network specified as a function.
Args:
fun_to_extract: Function implementing a simple MLP/CNN network composed
of alternating linear layers and ReLU activation functions.
example_input: Example of input to `fun_to_extract`.
Returns:
params: Parameters for the CNN/MLP, as taken by `predict_cnn` or
`predict_mlp`
"""
jaxpr_maker = jax.make_jaxpr(fun_to_extract)
parsed = jaxpr_maker(example_input)
layers = []
next_is_relu = False
scaling = None
centering = None
jax_parameters = {var: param_vals
for var, param_vals in zip(parsed.jaxpr.constvars,
parsed.literals)}
def _get_const_input_arg(eqn):
if eqn.invars[0] in jax_parameters:
return eqn.invars[0]
elif eqn.invars[1] in jax_parameters:
return eqn.invars[1]
else:
raise ValueError('None of the primitive\'s input is a weight tensor.')
for eqn in parsed.jaxpr.eqns:
if eqn.primitive == lax.reshape_p:
if eqn.invars[0] in jax_parameters:
# If this is a reshaping of a constant / the input of the reshape will
# be in the jax_parameters dict. We can treat the reshaped constant as a
# another constant.
out_var = eqn.outvars[0]
inps = [jax_parameters[eqn.invars[0]]]
jax_parameters[out_var] = eqn.primitive.bind(*inps, **eqn.params)
else:
# If it is a reshape on the pass of the network forward propagation, we
# can ignore it as the forward evaluation code in sdp_verify handles the
# reshaping itself
continue
elif eqn.primitive in (lax.dot_general_p,
lax.conv_general_dilated_p):
if next_is_relu:
raise ValueError('Unsupported architecture. Only supported networks are'
' alternance of linear (convolutional/fully connected)'
' and ReLU layers.')
# Find the input which is a parameter.
param_input = _get_const_input_arg(eqn)
weight_params = jax_parameters[param_input]
if eqn.primitive == lax.dot_general_p:
bias_shape = weight_params.shape[1]
scaling_shape = (-1, 1)
else:
bias_shape = (1, 1, 1, weight_params.shape[-1])
# Based on the code in `fwd`, the dimension for the input channel is the
# third one.
scaling_shape = (1, 1, -1, 1)
# Define the bias of the network, potentially incorporating existing
# preprocessing steps
bias = jnp.zeros(bias_shape)
if centering is not None:
inp_bias = jnp.zeros_like(example_input) + centering
equivalent_bias = eqn.primitive.bind(inp_bias, weight_params,
**eqn.params)
bias += equivalent_bias
centering = None
if scaling is not None:
scaling = jnp.reshape(scaling, scaling_shape)
weight_params = weight_params * scaling
scaling = None
if eqn.primitive == lax.dot_general_p:
layers.append((weight_params, bias))
else:
# The eval function of sdp_verify only handle stride equal
# that are the same in all directions
strides = eqn.params['window_strides']
if not all(elt == strides[0] for elt in strides):
raise ValueError('Different spatial strides unsupported.')
# The forward code expect the bias to only correspond to one column.
bias = jnp.reshape(bias[0, 0, 0, :], (1, 1, 1, -1))
layers.append({
'W': weight_params,
'b': bias,
'stride': strides[0],
'padding': eqn.params['padding'],
})
next_is_relu = True
elif eqn.primitive == lax.add_p:
param_input = _get_const_input_arg(eqn)
bias_params = jax_parameters[param_input]
if not next_is_relu:
raise ValueError('Unsupported architecture. Only supported networks are'
' alternance of linear (convolutional/fully connected)'
' and ReLU layers.')
# This is an addition after a linear layer. Just fold it into the bias
# of the previous layer.
if isinstance(layers[-1], tuple):
# Remove the last linear layer and include a version of it that includes
# the bias term.
weight, bias = layers.pop()
layers.append((weight, bias + bias_params))
else:
layers[-1]['b'] = layers[-1]['b'] + bias_params
# No need to update `next_is_relu` because it remains True
elif eqn.primitive == lax.sub_p:
if layers:
# We handle this only in the case of preprocessing at the beginning of
# the network.
raise ValueError('Unsupported operation. sub is only supported as a'
'centering of the networks inputs.')
# This appears potentially at the beginning of the network, during the
# preprocessing of the inputs.
centering = centering or 0.
centering -= jax_parameters[_get_const_input_arg(eqn)]
elif eqn.primitive == lax.div_p:
if layers:
# We handle this only in the case of preprocessing at the beginning of
# the network.
raise ValueError('Unsupported operation. div is only supported as a'
'rescaling of the networks inputs.')
divide_scaling = jax_parameters[_get_const_input_arg(eqn)]
# This appears during the preprocessing of the inputs of the networks.
scaling = scaling or 1.0
scaling /= divide_scaling
# Rescale the centering if there is one.
if centering is not None:
centering /= divide_scaling
elif eqn.primitive == lax.max_p:
if ((not next_is_relu)
or (not isinstance(eqn.invars[1], jax.core.Literal))
or (eqn.invars[1].val != 0.0)):
raise ValueError('Unsupported architecture. Only supported networks are'
' alternance of linear (convolutional/fully connected)'
' and ReLU layers.')
# The ReLU are not denoted in the parameters dictionaries so no need to
# add anything to the layers list.
next_is_relu = False
elif eqn.primitive == lax.broadcast_in_dim_p:
# There might be broadcast of bias, we'll just store the original bias
# where the broadcasted one should be.
jax_parameters[eqn.outvars[0]] = jax_parameters[eqn.invars[0]]
else:
raise ValueError(f'Unsupported primitive {eqn.primitive}. The only '
'supported networks are alternance of linear '
'(convolutional / fully connected) and ReLU layers.')
return layers
################## Bound prop ####################
def boundprop(params, bounds_in):
"""Compute IntervalBound for each layer."""
layer_bounds = [bounds_in]
for layer_params in params:
lb_old = layer_bounds[-1].lb
ub_old = layer_bounds[-1].ub
if isinstance(layer_params, dict):
rad_layer_params = copy.deepcopy(layer_params)
rad_layer_params['W'] = jnp.abs(layer_params['W'])
rad_layer_params['b'] = jnp.zeros_like(layer_params['b'])
elif isinstance(layer_params, tuple):
W, b = layer_params
rad_layer_params = jnp.abs(W), jnp.zeros_like(b)
elif callable(layer_params):
zero_inputs = jnp.zeros_like(lb_old)
wt = jax.jacfwd(layer_params)(zero_inputs) # Output axes appear first.
in_axes = list(range(wt.ndim - lb_old.ndim, wt.ndim))
rad_layer_params = lambda x: jnp.sum(x * jnp.abs(wt), axis=in_axes) # pylint:disable=cell-var-from-loop ; only used within the iteration
else:
raise NotImplementedError('Unknown layer')
center = (lb_old + ub_old) / 2.
radius = (ub_old - lb_old) / 2.
act_c = fwd(center, layer_params)
act_r = fwd(radius, rad_layer_params)
lb = act_c - act_r
ub = act_c + act_r
layer_bounds.append(IntBound(lb_pre=lb,
ub_pre=ub,
lb=jnp.maximum(lb, 0.),
ub=jnp.maximum(ub, 0.)))
return layer_bounds
def init_bound(x, epsilon, input_bounds=(0., 1.), add_batch_dim=True):
x = np.expand_dims(x, axis=0) if add_batch_dim else x
lb_init = np.maximum(input_bounds[0], x - epsilon)
ub_init = np.minimum(input_bounds[1], x + epsilon)
return IntBound(lb=lb_init, ub=ub_init, lb_pre=None, ub_pre=None)
def ibp_bound_elided(verif_instance):
assert len(verif_instance.bounds) == len(verif_instance.params) + 2
obj, obj_const, final_bound = (
verif_instance.obj, verif_instance.const, verif_instance.bounds[-2])
ub = final_bound.ub.reshape(final_bound.ub.shape[0], -1)
lb = final_bound.lb.reshape(final_bound.lb.shape[0], -1)
obj_val = np.sum(np.maximum(obj, 0.) * ub +
np.minimum(obj, 0.) * lb)
return float(obj_val + obj_const)
def ibp_bound_nonelided(verif_instance):
assert len(verif_instance.bounds) == len(verif_instance.params) + 2
obj_orig = verif_instance.obj_orig
final_bound = verif_instance.bounds[-1]
batch_size = final_bound.ub_pre.shape[0]
ub = final_bound.ub_pre.reshape(batch_size, -1)
lb = final_bound.lb_pre.reshape(batch_size, -1)
obj_val = np.sum(np.maximum(obj_orig, 0.) * ub +
np.minimum(obj_orig, 0.) * lb)
return float(obj_val)
################## Solver-agnostic Verification Instances ####################
class VerifInstanceTypes(enum.Enum):
# `params` represent a network of repeated relu(Wx+b)
# The final output also includes a relu activation, and `obj` composes
# the final layer weights with the original objective
MLP_ELIDED = 'mlp_elided'
CNN_ELIDED = 'cnn_elided'
def make_relu_robust_verif_instance(
params, bounds=None, target_label=1, label=2, input_bounds=None):
"""Make VerifInstance from network weights and input.
Args:
params: list of pairs of array-like objects [(W, b)], the weights and biases
of a multi-layer perceptron.
bounds: None, or a list of IntBound objects, of length len(params) + 1.
The interval bounds for each layer.
target_label: int, the adversary target
label: int, the true label
input_bounds: None, pair of floats, or pair of vectors with length matching
input dimension. The image bounds e.g. (0, 1) or (0, 255).
Returns:
verif_instance: a VerifInstance object
"""
op_size, verif_type = output_size_and_verif_type(params, bounds)
elided_params, obj_orig = elide_params(params, label, target_label, op_size)
obj = elided_params[-1][0].transpose()
const = jnp.squeeze(elided_params[-1][1])
return _VerifInstance(
params=params[:-1],
params_full=params,
input_bounds=input_bounds,
bounds=bounds,
obj=obj,
obj_orig=obj_orig,
const=const,
type=verif_type)
def make_relu_robust_verif_instance_elided(
params, bounds=None, input_bounds=None):
"""Make VerifInstance from network weights and input.
Args:
params: list of pairs of array-like objects [(W, b)], the weights and biases
of a multi-layer perceptron with the final layer elided with the
objective.
bounds: None, or a list of IntBound objects, of length len(params) + 1.
The interval bounds for each layer.
input_bounds: None, pair of floats, or pair of vectors with length matching
input dimension. The image bounds e.g. (0, 1) or (0, 255).
Returns:
verif_instance: a VerifInstance object
"""
op_size, verif_type = output_size_and_verif_type(params, bounds)
# Already elided
assert op_size == 1
return _VerifInstance(
params=params[:-1],
params_full=params,
input_bounds=input_bounds,
bounds=bounds,
obj=params[-1][0],
obj_orig=jnp.ones(1),
const=params[-1][1],
type=verif_type)
def output_size_and_verif_type(params, bounds):
"""Returns size of output, and verify_type from params and bounds."""
assert bounds is None or len(bounds) == len(params) + 1
if bounds is None:
assert not any([isinstance(x, dict) for x in params])
layer_sizes = mlp_layer_sizes(params)
else:
layer_sizes = layer_sizes_from_bounds(bounds)
# Adversary maximizes objective - large when logit(target) > logit(label)
if any([isinstance(x, dict) for x in params]):
verif_type = VerifInstanceTypes.CNN_ELIDED
else:
verif_type = VerifInstanceTypes.MLP_ELIDED
return layer_sizes[-1], verif_type
make_nn_verif_instance = make_relu_robust_verif_instance # alias
################## SDP Verification Instances ####################
class DualVarTypes(enum.Enum):
EQUALITY = 'equality'
INEQUALITY = 'inequality'
def elide_params(params, label, target_label, op_size):
label_onehot = jnp.eye(op_size)[label]
target_onehot = jnp.eye(op_size)[target_label]
obj_orig = target_onehot - label_onehot
w_fin, b_fin = params[-1]
obj_bp = jnp.matmul(w_fin, obj_orig)
const = jnp.expand_dims(jnp.vdot(obj_orig, b_fin), axis=-1)
obj = jnp.reshape(obj_bp, (obj_bp.size, 1))
params_elided = params[:-1] + [(obj, const)]
return params_elided, obj_orig
################### Image Preprocessing #######################
def preprocess_cifar(image, inception_preprocess=False, perturbation=False):
"""Proprocess images and perturbations."""
if inception_preprocess:
# Use 2x - 1 to get [-1, 1]-scaled images
rescaled_devs = 0.5
rescaled_means = 0.5
else:
rescaled_means = np.array([125.3, 123.0, 113.9]) / 255
rescaled_devs = np.array([63.0, 62.1, 66.7]) / 255
if perturbation:
return image / rescaled_devs
else:
return (image - rescaled_means) / rescaled_devs
def preprocessed_cifar_eps_and_input_bounds(
shape=(32, 32, 3), epsilon=2/255, inception_preprocess=False):
"""Get `epsilon` and `input_bounds`."""
preprocess = functools.partial(
preprocess_cifar, inception_preprocess=inception_preprocess)
epsilon = preprocess(np.ones(shape)* epsilon, perturbation=True)
input_bounds = (preprocess(np.zeros(shape)), preprocess(np.ones(shape)))
return epsilon, input_bounds
################### Attacks #######################
def adv_objective(model_fn, x, label, target_label):
logits = model_fn(x)
if len(logits.shape) == 2:
logits = logits[0]
loss = logits[target_label] - logits[label]
return jnp.sum(loss)
def fgsm_single(model_fn, x, label, target_label, epsilon, num_steps,
step_size, input_bounds=(0., 1.)):
"""Same interface as l.d.r.adversarial.attacks, but no batch dim on x."""
adv_loss = lambda *args, **kwargs: -adv_objective(*args, **kwargs)
adv_loss_x = lambda x: adv_loss(model_fn, x, label, target_label)
return pgd(adv_loss_x, x, epsilon, num_steps, step_size,
input_bounds=input_bounds)
def untargeted_margin_loss(logits, labels):
"""Minimized by decreasing true score, and increasing second highest."""
batch_size = logits.shape[0]
num_classes = logits.shape[-1]
label_logits = logits[jnp.arange(batch_size), labels]
logit_mask = jax.nn.one_hot(labels, num_classes)
inf = 1e5
highest_logits = jnp.max(logits - inf * logit_mask, axis=-1)
return label_logits - highest_logits
def pgd_default(model_fn, x, label, epsilon, num_steps, step_size,
input_bounds=(0., 1.)):
assert x.shape[0] == label.shape[0]
adv_loss_x = lambda x: jnp.sum(untargeted_margin_loss(model_fn(x), label))
return pgd(adv_loss_x, x, epsilon, num_steps, step_size,
input_bounds=input_bounds)
def pgd(adv_loss, x_init, epsilon, num_steps, step_size, input_bounds=(0., 1.)):
grad_adv_loss = jax.grad(adv_loss)
x = x_init
for _ in range(num_steps):
grad_x = grad_adv_loss(x)
x -= jnp.sign(grad_x) * step_size
x = jnp.clip(x, x_init - epsilon, x_init + epsilon)
x = jnp.clip(x, input_bounds[0], input_bounds[1])
return x
################ Optimizers ##################
def scale_by_variable_opt(multipliers):
"""Custom learning rates for different variables.
Args:
multipliers: a pytree, with the same structure as `params`. Each leaf can
be either a float, or an array shape-compatible with the corresponding
`params` element. These multiply the learning rate for each leaf.
Returns:
optax.GradientTransformation optimizer
"""
def init_fn(params):
params_struct = jax.tree_map(lambda _: None, params)
multipliers_struct = jax.tree_map(lambda _: None, multipliers)
assert params_struct == multipliers_struct, (
'multipliers should have same struct as params')
return None
def update_fn(updates, _, params=None):
del params # Unused.
scaled_updates = jax.tree_map(lambda a, g: a * g, multipliers, updates)
return scaled_updates, None
return optax.GradientTransformation(init_fn, update_fn)
################ Pytrees / Misc ##################
def flatten(pytree, backend=np):
"""Take pytree of arrays, then flatten tree, flatten arrays, concatenate."""
seq = tree.flatten(pytree)
seq_flat = [backend.reshape(x, -1) for x in seq]
return backend.concatenate(seq_flat)
def unflatten_like(a, pytree):
"""Take 1-D array produced by flatten() and unflatten like pytree."""
seq = tree.flatten(pytree)
seq_sizes = [np.reshape(x, -1).shape for x in seq]
starts = [0] + list(np.cumsum(seq_sizes))
a_seq_flat = [a[starts[i]:starts[i+1]] for i in range(len(starts)-1)]
a_seq = [np.reshape(x1, x2.shape) for x1, x2 in zip(a_seq_flat, seq)]
return tree.unflatten_as(pytree, a_seq)
def structure_like(tree1, tree2):
# pylint: disable=g-doc-args, g-doc-return-or-yield
"""Makes tree1 have same structure as tree2."""
flat_paths1 = tree.flatten_with_path(tree.map_structure(lambda x: 0, tree1))
flat_paths2 = tree.flatten_with_path(tree.map_structure(lambda x: 0, tree2))
assert list(sorted(flat_paths1)) == list(sorted(flat_paths2)), (
'paths of tree1 and tree2 do not match')
indices = [flat_paths1.index(path) for path in flat_paths2]
flat_tree1 = tree.flatten(tree1)
reordered_flat_tree1 = [flat_tree1[i] for i in indices]
return tree.unflatten_as(tree2, reordered_flat_tree1)
| jax_verify-master | jax_verify/extensions/sdp_verify/utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for computing eigenvectors/eigenvalues (e.g. Lanczos)."""
# pylint: disable=invalid-name
# Capital letters for matrices
import time
from absl import logging
import jax
import jax.lax as lax
import jax.numpy as jnp
import jax.random as random
import jax.scipy.linalg
import numpy as np
def safe_eigh(a, UPLO=None, symmetrize_input=True):
# TODO: Remove when issue with CUDA eigh is resolved
eigs, eig_vecs = lax.cond(
jnp.linalg.norm(a) > 0.0,
lambda t: jax.scipy.linalg.eigh(t, UPLO, symmetrize_input),
lambda _: (jnp.zeros(a.shape[0]), jnp.eye(a.shape[0])),
operand=a)
return jax.lax.stop_gradient(eigs), jax.lax.stop_gradient(eig_vecs)
def lanczos_alg(matrix_vector_product,
dim,
order,
rng_key,
dynamic_unroll=True,
use_jax=True,
verbose=False):
"""Lanczos algorithm for tridiagonalizing a real symmetric matrix.
This function applies Lanczos algorithm of a given order. This function
does full reorthogonalization.
WARNING: This function may take a long time to jit compile (e.g. ~3min for
order 90 and dim 1e7).
Args:
matrix_vector_product: Maps v -> Hv for a real symmetric matrix H.
Input/Output must be of shape [dim].
dim: Matrix H is [dim, dim].
order: An integer corresponding to the number of Lanczos steps to take.
rng_key: The jax PRNG key.
dynamic_unroll: bool, False does static unroll (default), True uses
jax.fori_loop for faster JIT compile times.
use_jax: Whether or not to do the lanczos computation in jax or numpy.
Can save memory to use numpy.
verbose: Print intermediate computations stats.
Returns:
tridiag: A tridiagonal matrix of size (order, order).
vecs: A numpy array of size (order, dim) corresponding to the Lanczos
vectors.
"""
if dynamic_unroll:
assert use_jax, 'Dynamic unroll only available with JAX.'
return _lanczos_alg_dynamic_unroll(
matrix_vector_product, dim, order, rng_key)
if use_jax:
backend = jnp
index_update = lambda x, idx, y: x.at[idx].set(y)
else:
backend = np
def _index_update(array, index, value):
new_array = array.copy()
new_array[index] = value
return new_array
index_update = _index_update
tridiag = backend.zeros((order, order))
vecs = backend.zeros((order, dim))
init_vec = random.normal(rng_key, shape=(dim,))
init_vec = init_vec / backend.linalg.norm(init_vec)
vecs = index_update(vecs, 0, init_vec)
beta = 0
start = time.time()
# TODO: Better to use lax.fori loop for faster compile?
for i in range(order):
if verbose:
end = time.time()
logging.info('Iter %d out of %d. Time: %f', i, order, end-start)
v = vecs[i, :].reshape((dim,))
if i == 0:
v_old = 0
else:
v_old = vecs[i - 1, :].reshape((dim,))
w = matrix_vector_product(v)
assert (w.shape[0] == dim and len(w.shape) == 1), (
'Output of matrix_vector_product(v) must be of shape [dim].')
w = w - beta * v_old
alpha = backend.dot(w, v)
tridiag = index_update(tridiag, (i, i), alpha)
w = w - alpha * v
# Full Reorthogonalization. Vectorized implementation of Gram Schmidt
coeffs = backend.dot(vecs, w)
scaled_vecs = (vecs.transpose()*coeffs).transpose()
scaled_vecs = backend.sum(scaled_vecs, axis=0)
w -= scaled_vecs
beta = backend.linalg.norm(w)
if i + 1 < order:
# Small beta (<1e-6) implies Lanczos has converged.
# TODO: Refactor to not run the loop when beta < 1e-6
beta_write = lax.cond(beta < 1e-6, beta, jnp.zeros_like, beta,
lambda x: x)
w_write = lax.cond(beta < 1e-6, w, jnp.zeros_like, w / beta, lambda x: x)
tridiag = index_update(tridiag, (i, i + 1), beta_write)
tridiag = index_update(tridiag, (i + 1, i), beta_write)
vecs = index_update(vecs, i + 1, w_write)
return (tridiag, vecs)
def _lanczos_alg_dynamic_unroll(
matrix_vector_product, dim, order, rng_key):
"""Lanczos with jax.fori_loop unroll - see docstring for lanczos_alg()."""
backend = jnp
index_update = lambda x, idx, y: x.at[idx].set(y)
tridiag = backend.zeros((order, order))
vecs = backend.zeros((order, dim))
init_vec = random.normal(rng_key, shape=(dim,))
init_vec = init_vec / backend.linalg.norm(init_vec)
vecs = index_update(vecs, 0, init_vec)
beta = 0.
def _body_fn_update_alpha(i, vecs, tridiag, beta):
"""Duplicated code from first half of body_fn() used for final iteration."""
v = vecs[i, :].reshape((dim,))
v_old = vecs[i - 1, :].reshape((dim,))
w = matrix_vector_product(v)
assert (w.shape[0] == dim and len(w.shape) == 1), (
'Output of matrix_vector_product(v) must be of shape [dim].')
w = w - beta * v_old
alpha = backend.dot(w, v)
tridiag = index_update(tridiag, (i, i), alpha)
return tridiag
def body_fn(i, vals):
"""Main body used for jax.fori_loop."""
vecs, tridiag, beta = vals
v = vecs[i, :].reshape((dim,))
v_old = lax.cond(i == 0, None, lambda x: jnp.zeros(dim, jnp.float32),
vecs, lambda vecs: vecs[i - 1, :].reshape((dim,)))
w = matrix_vector_product(v)
assert (w.shape[0] == dim and len(w.shape) == 1), (
'Output of matrix_vector_product(v) must be of shape [dim].')
w = w - beta * v_old
alpha = backend.dot(w, v)
tridiag = index_update(tridiag, (i, i), alpha)
w = w - alpha * v
# Full Reorthogonalization. Vectorized implementation of Gram Schmidt
coeffs = backend.dot(vecs, w)
scaled_vecs = (vecs.transpose()*coeffs).transpose()
scaled_vecs = backend.sum(scaled_vecs, axis=0)
w -= scaled_vecs
beta = backend.linalg.norm(w)
# Small beta (<1e-6) implies Lanczos has converged.
beta_write = lax.cond(beta < 1e-6, beta, jnp.zeros_like, beta, lambda x: x)
w_write = lax.cond(beta < 1e-6, w, jnp.zeros_like, w / beta, lambda x: x)
tridiag = index_update(tridiag, (i, i + 1), beta_write)
tridiag = index_update(tridiag, (i + 1, i), beta_write)
vecs = index_update(vecs, i + 1, w_write)
return (lax.stop_gradient(vecs), lax.stop_gradient(tridiag),
lax.stop_gradient(beta))
vecs, tridiag, beta_final = jax.lax.fori_loop(
0, order - 1, body_fn, (vecs, tridiag, beta))
# Update tridiag one last time for final iteration
tridiag = _body_fn_update_alpha(order - 1, vecs, tridiag, beta_final)
return (tridiag, vecs)
def _make_pos(vecx):
return jnp.maximum(vecx, 0)
############ Lanczos, Lagrangian, dual function ############
def max_eigenvector_lanczos(matrix_vector_product, dim, order, key, scl=-1,
dynamic_unroll=True, use_safe_eig_vec=True):
"""Get (soft)max eigenvector via Lanczos + Scipy eigendecomp."""
tridiag, vecs = lanczos_alg(matrix_vector_product, dim, order, key,
verbose=False, dynamic_unroll=dynamic_unroll)
eigs_triag, eig_vecs = safe_eigh(tridiag)
if scl < 0:
# Get max eigenvector
eig_vec = jnp.dot(jnp.transpose(vecs), eig_vecs[:, -1])
else:
# Softmax weighting of max eigenvector - better gradients?
eig_softmax = jnp.exp(scl*eigs_triag -
jax.scipy.special.logsumexp(scl * eigs_triag))
eig_vec = jnp.dot(jnp.transpose(vecs), jnp.dot(eig_vecs, eig_softmax))
if use_safe_eig_vec:
# To handle the case when the norm of the eigen vector is ~0.
# This can happen when triag is rank deficient. To handle this corner case,
# sample a new eigen-vector and remove components with respect to
# all eigen-vecs with non-zero eigen vals to get a vector in the null-space
# of the Hessian => Eigen vector correspoding to eig-val 0.
# TODO: Possible suspect if Lanczos starts to diverge.
def get_eig_vec(vals):
key, (eig_vecs, vecs, eigs_triag) = vals
eig_vec = jnp.dot(jnp.transpose(vecs), eig_vecs[:, -1])
random_vec = jax.random.uniform(key, shape=eig_vec.shape)
eig_vecs = jnp.dot(jnp.transpose(vecs), eig_vecs)
coeffs = jnp.dot(random_vec, eig_vecs)
scaled_vecs = (eig_vecs * coeffs * (eigs_triag > 1e-7))
scaled_vecs = jnp.sum(scaled_vecs, axis=1)
eig_vec = random_vec - scaled_vecs
return eig_vec
vals = key, (eig_vecs, vecs, eigs_triag)
eig_norm = jnp.linalg.norm(eig_vec)
eig_vec = lax.cond(eig_norm < 1e-7, vals, get_eig_vec, eig_vec, lambda x: x)
eig_vec = eig_vec/jnp.linalg.norm(eig_vec)
return jax.lax.stop_gradient(eig_vec)
def min_eigenvector_lanczos(matrix_vector_product, *args, **kwargs):
# If for matrix H, eigenvector v has eigenvalue lambda, then for matrix -H,
# v has eigenvalue -lambda. So we find max eigenvector for -H instead.
neg_mat_vec_product = lambda v: -matrix_vector_product(v)
return max_eigenvector_lanczos(neg_mat_vec_product, *args, **kwargs)
def max_eigenvector_exact(matrix_vector_product, vec_dim, scl=-1,
report_all=False):
"""Get max eigenvector via Scipy eigendecomp."""
@jax.jit
def batched_Hv(v_batched):
return jax.vmap(matrix_vector_product)(v_batched)
H = batched_Hv(jnp.eye(vec_dim))
H = (H + H.T)/2
eig_vals, eig_vecs = safe_eigh(H)
if scl < 0:
eig_vec = eig_vecs[:, -1]
else:
eig_softmax_weights = jnp.exp(
scl*eig_vals - jax.scipy.special.logsumexp(scl*eig_vals))
eig_vec = jnp.sum(eig_vecs * jnp.expand_dims(eig_softmax_weights, axis=0),
axis=1)
eig_vec = jax.lax.stop_gradient(eig_vec)
if report_all:
return eig_vec, (eig_vals, eig_vecs, H)
return eig_vec
def min_eigenvector_exact(matrix_vector_product, vec_dim, scl=-1,
report_all=False):
neg_mat_vec_product = lambda v: -matrix_vector_product(v)
return max_eigenvector_exact(neg_mat_vec_product, vec_dim,
scl=scl, report_all=report_all)
| jax_verify-master | jax_verify/extensions/sdp_verify/eigenvector_utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library functions for SDP verification of neural networks."""
# pylint: disable=invalid-name
# Capital letters for matrices
import collections
import functools
from absl import logging
import jax
import jax.numpy as jnp
import jax.scipy
from jax_verify.extensions.sdp_verify import eigenvector_utils
from jax_verify.extensions.sdp_verify import utils
import numpy as np
import optax
import tree
IntBound = utils.IntBound
boundprop = utils.boundprop
flatten = lambda x: utils.flatten(x, backend=jnp)
def dual_fun(verif_instance, dual_vars, key=None, n_iter=30, scl=-1,
exact=False, dynamic_unroll=True, include_info=False):
# pylint: disable=invalid-name
"""Returns the dual objective value.
Args:
verif_instance: a utils.SdpDualVerifInstance, the verification problem
dual_vars: A list of dual variables at each layer
key: PRNGKey passed to Lanczos
n_iter: Number of Lanczos iterations to use
scl: Inverse temperature in softmax over eigenvalues to smooth optimization
problem (if negative treat as hardmax)
exact: Whether to use exact eigendecomposition instead of Lanczos
dynamic_unroll: bool. Whether to use jax.fori_loop for Lanczos for faster
JIT compilation. Default is False.
include_info: if True, also return an `info` dict of various other
values computed for the objective
Returns:
Either a single float, the dual upper bound, or if ``include_info=True``,
returns a pair, the dual bound and a dict containing debugging info
"""
key = key if key is not None else jax.random.PRNGKey(0)
assert isinstance(verif_instance, utils.SdpDualVerifInstance)
bounds = verif_instance.bounds
layer_sizes = utils.layer_sizes_from_bounds(bounds)
layer_sizes_1d = [np.prod(np.array(i), dtype=np.int32) for i in layer_sizes]
N = sum(layer_sizes_1d) + 1
info = {}
# Mean activations at each layer
activations_center = [(b.lb + b.ub) / 2 for b in bounds]
# Maximum deviation from mean activations
radius = [(b.ub - b.lb) / 2 for b in bounds]
inner_lagrangian = verif_instance.make_inner_lagrangian(dual_vars)
lagrangian = _make_transformed_lagrangian(
inner_lagrangian, activations_center, radius)
# Construct c_lambda and g_lambda terms
zeros = [jnp.zeros(sz) for sz in layer_sizes]
c_lambda = lagrangian(zeros)
g_lambda = jax.grad(lagrangian)(zeros)
g_lambda = flatten(g_lambda)
info['c_lambda'] = c_lambda
def Hv(v):
"""Hessian-vector product for H_lambda - refer to docstring for `Av()`."""
lag_grad = lambda v2: flatten(jax.grad(lagrangian)(v2))
hv_v = jax.grad(lambda v2: jnp.vdot(lag_grad(v2), v))(zeros)
hv_flat = flatten(hv_v)
return hv_flat
def Av(v):
"""Matrix-vector product.
Args:
v: vector, DeviceArray
Returns:
Av: vector, Device array. A is defined as diag(kappa) - M(lambda) where
M(lambda) = [0, g_lambda';
g_lambda, H_lambda], and these terms correspond to
L~(z) = c_lambda + g_lambda' z + z' H_lambda z
"""
# Expand Mv=[0 g'; g H] [v0;v1] = [g'v1; v0*g + H(v1)] = [Mv0;Mv1]
# Compute Mv0 term
mv_zero = jnp.reshape(jnp.vdot(g_lambda, v[1:]), (1,))
# Compute Mv1 term
mv_rest = Hv(v[1:]) + v[0] * g_lambda
mv = jnp.concatenate([mv_zero, mv_rest], axis=0)
diag_kappa_v = jnp.reshape(dual_vars[-1], mv.shape) * v
av = diag_kappa_v - mv
return jnp.reshape(av, v.shape)
# Construct dual function (dual_vars[-1]=kappa)
if exact:
eig_vec, eig_info = eigenvector_utils.min_eigenvector_exact(
Av, N, scl=scl, report_all=True)
info['eig_info'] = eig_info
else:
eig_vec = eigenvector_utils.min_eigenvector_lanczos(
Av, N, min(N, n_iter), key, scl, dynamic_unroll=dynamic_unroll)
info['eig_vec'] = eig_vec
info['kappa'] = dual_vars[-1]
hess_val = jnp.vdot(eig_vec, Av(eig_vec))/(jnp.vdot(eig_vec, eig_vec))
hess_val = jnp.reshape(hess_val, ())
# Form dual objective
lambda_minus = jnp.minimum(hess_val, 0.)
kappa_hat = jnp.maximum(0, dual_vars[-1] - lambda_minus)
dual_val = c_lambda + 0.5 * jnp.sum(kappa_hat)
if include_info:
return dual_val, info
return dual_val
def _make_transformed_lagrangian(lagrangian, activations_center, radius):
"""Returns a function that computes transformed Lagrangian L~(z).
Args:
lagrangian: function L(x), the lagrangian with fixed dual variables.
activations_center: list of Device arrays corresponding to mean activations
by layer.
radius: list of Device arrays corresponding to the interval bound radii by
layer.
Returns:
transformed_lagrangian: a function L~(z), defined as
L~(z) = L(activations_center + z * radius).
For compatibility with the paper, the inner optimization should have [-1, 1]
element-wise constraints, so here we re-express:
max_{x: a-rad<=x<=a+rad} L(x), as
max_{z: -1<=z<=1} L~(z), with L~ defined as above.
"""
def transformed_lagrangian(zs):
zs = [a + z * r for (a, z, r) in zip(activations_center, zs, radius)]
return lagrangian(zs)
return transformed_lagrangian
def project_duals(dual_vars, dual_types):
"""Projects dual variables to satisfy dual constraints."""
make_pos = lambda v: None if v is None else jnp.maximum(v, 0)
_project = lambda v, t: make_pos(v) if t == DualVarTypes.INEQUALITY else v
return jax.tree_map(_project, dual_vars, dual_types)
def solve_sdp_dual_simple(verif_instance, key=None, opt=None, num_steps=10000,
eval_every=1000, verbose=False,
use_exact_eig_eval=True, use_exact_eig_train=False,
n_iter_lanczos=100,
kappa_reg_weight=None, kappa_zero_after=None,
device_type=None):
"""Compute verified lower bound via dual of SDP relaxation.
Args:
verif_instance: a utils.SdpDualVerifInstance
key: jax.random.PRNGKey, used for Lanczos
opt: an optax.GradientTransformation instance, the optimizer.
If None, defaults to Adam with learning rate 1e-3.
num_steps: int, the number of outer loop optimization steps
eval_every: int, frequency of running evaluation step
verbose: bool, enables verbose logging
use_exact_eig_eval: bool, whether to use exact eigendecomposition instead of
Lanczos when computing evaluation loss
use_exact_eig_train: bool, whether to use exact eigendecomposition instead
of Lanczos during training
n_iter_lanczos: int, number of Lanczos iterations
kappa_reg_weight: float, adds a penalty of sum(abs(kappa_{1:N})) to loss,
which regularizes kappa_{1:N} towards zero. Default None is disabled.
kappa_zero_after: int, clamps kappa_{1:N} to zero after ``kappa_zero_after``
steps. Default None is disabled.
device_type: string, used to clamp to a particular hardware device. Default
None uses JAX default device placement
Returns:
A pair. The first element is a float, the final dual loss, which forms a
valid upper bound on the objective specified by ``verif_instance``. The
second element is a dict containing various debug info.
"""
assert device_type in (None, 'cpu', 'gpu'), 'invalid device_type'
assert isinstance(verif_instance, utils.SdpDualVerifInstance), 'invalid type'
key = key if key is not None else jax.random.PRNGKey(0)
opt = opt if opt is not None else optax.adam(1e3)
dual_vars = jax.tree_map(
lambda s: None if s is None else jnp.zeros(s), verif_instance.dual_shapes)
dual_vars = init_duals_ibp(verif_instance, dual_vars)
# Define loss function
def loss(dual_vars, exact=use_exact_eig_train):
return _loss(dual_vars, exact)
@functools.partial(jax.jit, static_argnums=(1,), backend=device_type)
def _loss(dual_var, exact):
loss_val, step_info = dual_fun(
verif_instance, dual_var, key, n_iter=n_iter_lanczos, exact=exact,
include_info=True)
step_info['loss_val'] = loss_val
return loss_val, step_info
# Define a compiled update step
grad = jax.jit(jax.grad(loss, has_aux=True), backend=device_type)
@functools.partial(jax.jit, backend=device_type)
def grad_step(params, opt_state):
g, info = grad(params)
updates, new_opt_state = opt.update(g, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state, info
# Optimize parameters in a loop
opt_state = opt.init(dual_vars)
info = collections.defaultdict(list)
loss_log = []
best_loss = 1e9
# Main loop
for i in range(num_steps):
dual_vars, opt_state, step_info = grad_step(dual_vars, opt_state)
loss_val = step_info['loss_val']
print(f'Iter {i}: Loss {loss_val}')
best_loss = min(best_loss, loss_val)
loss_log.append(loss_val)
# Regularization of kappa
if kappa_reg_weight is not None and kappa_reg_weight >= 0:
onehot = jax.nn.one_hot([0], dual_vars[-1].shape[1])
mask = jnp.ones_like(onehot) - onehot
dual_vars[-1] -= mask * kappa_reg_weight
if (kappa_zero_after is not None and kappa_zero_after >= 0 and
i > kappa_zero_after):
onehot = jax.nn.one_hot([0], dual_vars[-1].shape[1])
dual_vars[-1] *= onehot
dual_vars = project_duals(dual_vars, verif_instance.dual_types)
if i % eval_every == 0:
dual_val, _ = loss(dual_vars, exact=use_exact_eig_eval)
info['steps'].append(i)
info['loss_vals'].append(float(dual_val))
if verbose:
print(f'Dual iter {i}: Train loss: {loss_val} Loss {dual_val}')
final_loss = float(loss(dual_vars, exact=use_exact_eig_eval)[0])
info['final_dual_vars'] = dual_vars
info['final_loss'] = final_loss
info['loss_log'] = loss_log
info['best_train_loss'] = best_loss
return final_loss, info
def solve_sdp_dual(verif_instance, key=None, opt=None, num_steps=10000,
verbose=False, eval_every=1000, use_exact_eig_eval=True,
use_exact_eig_train=False, n_iter_lanczos=30, scl=-1.0,
lr_init=1e-3, steps_per_anneal=100, anneal_factor=1.0,
num_anneals=3, opt_name='adam', gd_momentum=0.9,
add_diagnostic_stats=False,
opt_multiplier_fn=None, init_dual_vars=None,
init_opt_state=None, opt_dual_vars=None,
kappa_reg_weight=None, kappa_zero_after=None,
device_type=None, save_best_k=1, include_opt_state=False):
# pylint: disable=g-doc-return-or-yield, g-doc-args
"""Compute verified lower bound via dual of SDP relaxation.
NOTE: This method exposes many hyperparameter options, and the method
signature is subject to change. We instead suggest using
``solve_sdp_dual_simple`` instead if you need a stable interface.
"""
# NB: Whereas the rest of the code in this library is fairly top-down
# readable, avoids excessive `if` statements, tries to make the code look
# like the formalism, etc, this is not the case for this method.
# This is essentially the outer loop, and includes all the debugging/logging/
# optimization tricks we need to get/debug good results.
#
# NB: Time profiling: On toy VerifInstances, JIT compilation dominates time
# cost: JIT compilation takes ~12s, then we do ~3000 steps/sec.
assert device_type in (None, 'cpu', 'gpu'), 'invalid device_type'
assert isinstance(verif_instance, utils.SdpDualVerifInstance), 'invalid type'
key = key if key is not None else jax.random.PRNGKey(0)
dual_vars = jax.tree_map(
lambda s: None if s is None else jnp.zeros(s), verif_instance.dual_shapes)
dual_vars = init_duals_ibp(verif_instance, dual_vars)
if init_dual_vars is not None:
# Casting, here for Colab. Essentially same as `dual_vars = init_dual_vars`
dual_vars = utils.structure_like(init_dual_vars, dual_vars)
if opt_dual_vars is not None:
opt_dual_vars = utils.structure_like(opt_dual_vars, dual_vars)
# Create optimizer
if opt is None:
if (isinstance(steps_per_anneal, float) or
isinstance(steps_per_anneal, int)):
anneal_steps = [steps_per_anneal*(i+1) for i in range(num_anneals)]
else:
anneal_steps = np.cumsum(steps_per_anneal)
anneal_steps = jnp.array(anneal_steps)
def lr_schedule(t):
cur_epoch = jnp.minimum(num_anneals, jnp.sum(t > anneal_steps))
return lr_init * jnp.float_power(anneal_factor, cur_epoch)
opt_class = getattr(optax, opt_name)
base_opt = (opt_class(1., momentum=gd_momentum) if opt_name == 'sgd' else
opt_class(1.))
opt = optax.chain(base_opt, optax.scale_by_schedule(lr_schedule))
if opt_multiplier_fn:
# NB: Interface very specific to tree.map_structure_with_path
# Example: opt_multiplier_fn=lambda path: 0.1 if 'lam' in path else 1.0
opt_multipliers = tree.map_structure_with_path(
lambda path, v: opt_multiplier_fn(path), dual_vars)
opt = optax.chain(base_opt, optax.scale_by_schedule(lr_schedule),
utils.scale_by_variable_opt(opt_multipliers))
else:
opt = optax.chain(base_opt, optax.scale_by_schedule(lr_schedule))
# Define loss function
def loss(dual_vars, loss_scl=scl, exact=use_exact_eig_train):
return _loss(dual_vars, loss_scl, exact)
@functools.partial(jax.jit, static_argnums=(1, 2), backend=device_type)
def _loss(dual_var, loss_scl, exact):
loss_val, step_info = dual_fun(
verif_instance, dual_var, key, n_iter=n_iter_lanczos, exact=exact,
scl=loss_scl, include_info=True)
step_info['loss_val'] = loss_val
return loss_val, step_info
# Define a compiled update step
grad = jax.jit(jax.grad(loss, has_aux=True), backend=device_type)
@functools.partial(jax.jit, backend=device_type)
def grad_step(params, opt_state):
g, info = grad(params)
updates, new_opt_state = opt.update(g, opt_state)
new_params = optax.apply_updates(params, updates)
info['g'] = g
info['updates'] = updates
return new_params, new_opt_state, info
# Optimize parameters in a loop
opt_state = opt.init(dual_vars)
if init_opt_state:
opt_state = utils.structure_like(init_opt_state, opt_state)
info = collections.defaultdict(list)
loss_log = []
store_best = []
recent_eig_vecs = collections.deque(maxlen=10)
best_loss = 1e9
last_H = None
start_i = 0
# Main loop
for i in range(start_i, num_steps):
dual_vars_prev = dual_vars
dual_vars, opt_state, step_info = grad_step(dual_vars, opt_state)
loss_val = step_info['loss_val']
print(f'Iter {i}: Loss {loss_val}')
best_loss = min(best_loss, loss_val)
if add_diagnostic_stats:
info['dual_vars'].append(dual_vars_prev)
eig_vec = step_info['eig_vec']
cosine_sims = []
for prev_eig_vec in recent_eig_vecs:
denom = jnp.sqrt(jnp.linalg.norm(eig_vec)*jnp.linalg.norm(prev_eig_vec))
eig_sim = jnp.sum(prev_eig_vec * eig_vec) / denom
cosine_sims.append(abs(float(eig_sim)))
info['c_lambda'].append(float(step_info['c_lambda']))
info['past_10_cosine_sims'].append(np.array(cosine_sims))
info['g'].append(step_info['g'])
info['updates'].append(step_info['updates'])
if use_exact_eig_train:
# The info is for -H, so to get smallest for H, take negative of max
eig_vals = -step_info['eig_info'][0][-1:-20:-1]
cur_H = step_info['eig_info'][2]
diff_H = 0 if last_H is None else np.linalg.norm(cur_H - last_H)
last_H = cur_H
info['diff_H'].append(float(diff_H))
info['smallest_20_eig_vals'].append(eig_vals)
recent_eig_vecs.appendleft(eig_vec)
loss_log.append(loss_val)
if len(store_best) < save_best_k:
store_best.append((loss_val, dual_vars_prev))
store_best.sort(key=lambda x: x[0])
elif loss_val < store_best[-1][0]:
store_best[-1] = (loss_val, dual_vars_prev)
store_best.sort(key=lambda x: x[0])
# Regularization of kappa
if kappa_reg_weight is not None and kappa_reg_weight >= 0:
onehot = jax.nn.one_hot([0], dual_vars[-1].shape[1])
mask = jnp.ones_like(onehot) - onehot
dual_vars[-1] -= mask * kappa_reg_weight
if (kappa_zero_after is not None and kappa_zero_after >= 0 and
i > kappa_zero_after):
onehot = jax.nn.one_hot([0], dual_vars[-1].shape[1])
dual_vars[-1] *= onehot
dual_vars = project_duals(dual_vars, verif_instance.dual_types)
if opt_dual_vars:
distance_to_opt = jax.tree_map(lambda x, y: jnp.linalg.norm(x - y),
dual_vars, opt_dual_vars)
info['distance_to_opt'].append(distance_to_opt)
if i % eval_every == 0:
dual_val, _ = loss(dual_vars, loss_scl=-1, exact=use_exact_eig_eval)
info['steps'].append(i)
info['loss_vals'].append(float(dual_val))
if verbose:
print(f'Dual iter {i}: Train loss: {loss_val} Loss {dual_val}')
final_loss = float(loss(dual_vars, loss_scl=-1, exact=use_exact_eig_eval)[0])
info['final_dual_vars'] = dual_vars
info['final_opt_state'] = opt_state
info['final_loss'] = final_loss
info['loss_log'] = loss_log
info['store_best'] = store_best
if include_opt_state:
return final_loss, info, opt_state
else:
return final_loss, info
solve_dual_sdp_elided = solve_sdp_dual # Alias
############ Dual initialization ############
DualVarTypes = utils.DualVarTypes
def init_duals(verif_instance, key):
"""Initialize dual variables to zeros."""
del key # unused
assert isinstance(verif_instance, utils.SdpDualVerifInstance)
zeros_or_none = lambda s: None if s is None else jnp.zeros(s)
return jax.tree_map(zeros_or_none, verif_instance.dual_shapes)
def _get_g_lambda(verif_instance, dual_vars):
"""Helper method for IBP initialization."""
# NB: This code is (intentionally) copy-pasted from `dual_fun`, in order to
# keep that method more top-to-bottom readable
bounds = verif_instance.bounds
# Mean activations at each layer
activations_center = [(b.lb + b.ub) / 2 for b in bounds]
# Maximum deviation from mean activations
radius = [(b.ub - b.lb) / 2 for b in bounds]
inner_lagrangian = verif_instance.make_inner_lagrangian(dual_vars)
lagrangian = _make_transformed_lagrangian(
inner_lagrangian, activations_center, radius)
g_lambda = jax.grad(lagrangian)(activations_center)
g_lambda = flatten(g_lambda)
return jnp.reshape(g_lambda, (1, -1))
def init_duals_ibp(verif_instance, dual_vars):
"""Closed-form solution for dual variables which recovers IBP bound."""
zero_duals = jax.tree_map(lambda x: x * 0., dual_vars)
g_lambda = _get_g_lambda(verif_instance, zero_duals)
kappa_opt_zero = jnp.reshape(jnp.sum(jnp.abs(g_lambda)), (1, 1))
kappa_opt = jnp.concatenate([kappa_opt_zero, jnp.abs(g_lambda)], axis=1)
ibp_duals = zero_duals[:-1] + [kappa_opt]
return ibp_duals
| jax_verify-master | jax_verify/extensions/sdp_verify/sdp_verify.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small helper functions."""
import abc
import collections
import dataclasses
import enum
from typing import Callable, List, Optional, Union
import chex
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import lagrangian_form
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import ml_collections
Params = collections.namedtuple('Params', ['inner', 'outer'])
ParamsTypes = collections.namedtuple('ParamsTypes',
['inner', 'outer', 'lagrangian_form'])
DataSpec = collections.namedtuple(
'DataSpec',
['input', 'true_label', 'target_label', 'epsilon', 'input_bounds'])
Array = chex.Array
ArrayTree = chex.ArrayTree
ConfigDict = ml_collections.ConfigDict
IntervalBound = jax_verify.IntervalBound
Tensor = jnp.array
LayerParams = Union['FCParams', 'ConvParams']
LagrangianForm = lagrangian_form.LagrangianForm
ModelParams = List[LayerParams]
ModelParamsElided = ModelParams
class AbstractParams(abc.ABC):
"""AbstractParams."""
def __call__(self, inputs: Tensor) -> Tensor:
"""Forward pass on layer."""
return sdp_utils.fwd(inputs, self.params)
@property
@abc.abstractmethod
def params(self):
"""Representation of params with sdp_utils.fwd convention."""
@property
def has_bounds(self):
return self.w_bound is not None or self.b_bound is not None # pytype: disable=attribute-error # bind-properties
@dataclasses.dataclass
class FCParams(AbstractParams):
"""Params of fully connected layer."""
w: Tensor
b: Tensor
w_bound: Optional[IntervalBound] = None
b_bound: Optional[IntervalBound] = None
w_std: Optional[Tensor] = None
b_std: Optional[Tensor] = None
dropout_rate: float = 0.0
@property
def params(self):
return (self.w, self.b)
@dataclasses.dataclass
class ConvParams(AbstractParams):
"""Params of convolutional layer."""
w: Tensor
b: Tensor
stride: int
padding: str
n_cin: Optional[int] = None
w_bound: Optional[IntervalBound] = None
b_bound: Optional[IntervalBound] = None
w_std: Optional[Tensor] = None
b_std: Optional[Tensor] = None
dropout_rate: float = 0.0
@property
def params(self):
return {
'W': self.w,
'b': self.b,
'n_cin': self.n_cin,
'stride': self.stride,
'padding': self.padding,
}
class SpecType(enum.Enum):
# `params` represent a network of repeated relu(Wx+b)
# The final output also includes a relu activation, and `obj` composes
# the final layer weights with the original objective
UNCERTAINTY = 'uncertainty'
ADVERSARIAL = 'adversarial'
ADVERSARIAL_SOFTMAX = 'adversarial_softmax'
PROBABILITY_THRESHOLD = 'probability_threshold'
class Distribution(enum.Enum):
"""Distribution of the weights and biases."""
GAUSSIAN = 'gaussian'
BERNOULLI = 'bernoulli'
class NetworkType(enum.Enum):
"""Distribution of the weights and biases."""
DETERMINISTIC = 'deterministic'
STOCHASTIC = 'stochastic'
@dataclasses.dataclass(frozen=True)
class InnerVerifInstance:
"""Specification of inner problems."""
affine_fns: List[Callable[[Array], Array]]
bounds: List[sdp_utils.IntervalBound]
lagrangian_form_pre: Optional[LagrangianForm]
lagrangian_form_post: Optional[LagrangianForm]
lagrange_params_pre: Optional[ArrayTree]
lagrange_params_post: Optional[ArrayTree]
is_first: bool
is_last: bool
idx: int
spec_type: SpecType
affine_before_relu: bool
@property
def same_lagrangian_form_pre_post(self) -> bool:
if self.is_first:
return True
elif self.is_last:
return True
else:
name_pre = self.lagrangian_form_pre.name
name_post = self.lagrangian_form_post.name
return name_pre == name_post
| jax_verify-master | jax_verify/extensions/functional_lagrangian/verify_utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lagrangian penalty functions."""
import abc
from typing import Sequence, Union
import jax
import jax.numpy as jnp
import jax.random as random
import ml_collections
PRNGKey = jnp.array
Tensor = jnp.array
Params = Union[Tensor, Sequence[Tensor]]
Shape = Union[int, Sequence[int]]
ConfigDict = ml_collections.ConfigDict
def _flatten_spatial_dims(x: Tensor) -> Tensor:
"""Flatten spatial dimensions (assumed batched)."""
return jnp.reshape(x, [x.shape[0], -1])
def size_from_shape(shape: Shape) -> int:
return int(jnp.prod(jnp.array(shape)))
class LagrangianForm(metaclass=abc.ABCMeta):
"""Abstract class for Lagrangian form."""
def __init__(self, name):
self._name = name
@abc.abstractmethod
def _init_params_per_sample(self, key: PRNGKey, *args) -> Params:
"""Initialize the parameters of the Lagrangian form."""
def init_params(self, key, *args, **kwargs):
params = self._init_params_per_sample(key, *args, **kwargs)
# expansion below currently assumes batch-size of 1
return jax.tree_map(lambda p: jnp.expand_dims(p, 0), params)
@abc.abstractmethod
def _apply(self, x: Tensor, lagrange_params: Params, step: int) -> Tensor:
"""Apply the Lagrangian form the input x given lagrange_params."""
def apply(self, x: Tensor, lagrange_params: Params, step: int) -> Tensor:
"""Apply the Lagrangian form the input x given lagrange_params.
Args:
x: layer inputs, assumed batched (in leading dimension). Note that the
spatial dimensions of x are flattened.
lagrange_params: parameters of the lagrangian parameters, assumed to have
the same batch-size as x. If provided as None, this function returns 0.
step: outer optimization iteration number (unused).
Returns:
value_per_sample: Lagrangian penalty per element of the mini-batch.
"""
if lagrange_params is None:
return jnp.array(0.0)
x = _flatten_spatial_dims(x)
value_per_sample = self._apply(x, lagrange_params, step)
return value_per_sample
def process_params(self, lagrange_params: Params):
return lagrange_params
@property
def name(self):
"""Return name."""
return self._name
class Linear(LagrangianForm):
"""Linear LagrangianForm (equivalent to DeepVerify formulation)."""
def __init__(self):
super().__init__('Linear')
def _init_params_per_sample(self,
key: PRNGKey,
l_shape: Shape,
init_zeros: bool = True) -> Params:
size = size_from_shape(l_shape)
if init_zeros:
return jnp.zeros([size])
else:
return random.normal(key, [size])
def _apply_per_sample(self, x: Tensor, lagrange_params: Params,
step: int) -> Tensor:
del step
return jnp.dot(x, lagrange_params)
def _apply(self, x: Tensor, lagrange_params: Params, step: int) -> Tensor:
apply_per_sample = lambda a, b: self._apply_per_sample(a, b, step)
return jax.vmap(apply_per_sample)(x, lagrange_params)
class LinearExp(LagrangianForm):
"""LinearExp LagrangianForm."""
def __init__(self):
super().__init__('LinearExp')
def _init_params_per_sample(self,
key: PRNGKey,
l_shape: Shape,
init_zeros: bool = False) -> Params:
size = size_from_shape(l_shape)
if init_zeros:
return jnp.zeros([size]), jnp.ones(()), jnp.zeros([size])
else:
return (1e-4 * random.normal(key, [size]), 1e-2 * random.normal(key, ()),
1e-2 * random.normal(key, [size]))
def _apply_per_sample(self, x: Tensor, lagrange_params: Params,
step: int) -> Tensor:
del step
linear_term = jnp.dot(x, lagrange_params[0])
lagrange_params = self.process_params(lagrange_params)
exp_term = lagrange_params[1] * jnp.exp(jnp.dot(x, lagrange_params[2]))
return linear_term + exp_term
def _apply(self, x: Tensor, lagrange_params: Params, step: int) -> Tensor:
apply_per_sample = lambda a, b: self._apply_per_sample(a, b, step)
return jax.vmap(apply_per_sample)(x, lagrange_params)
def get_lagrangian_form(config_lagrangian_form: ConfigDict) -> LagrangianForm:
"""Create the Lagrangian form."""
name = config_lagrangian_form['name']
kwargs = config_lagrangian_form['kwargs']
if name == 'linear':
return Linear(**kwargs)
elif name == 'linear_exp':
return LinearExp(**kwargs)
else:
raise NotImplementedError(f'Unrecognized lagrangian functional: {name}')
| jax_verify-master | jax_verify/extensions/functional_lagrangian/lagrangian_form.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library functions for verification of neural networks using functional lagrange multipliers."""
import abc
import dataclasses
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import lax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import lagrangian_form as lag_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import sdp_verify
from jax_verify.extensions.sdp_verify import utils as sdp_utils
from jax_verify.src import bound_propagation
from jax_verify.src import graph_traversal
from jax_verify.src import synthetic_primitives
from jax_verify.src.types import Nest
import numpy as np
import optax
Params = verify_utils.Params
ParamsTypes = verify_utils.ParamsTypes
InnerVerifInstance = verify_utils.InnerVerifInstance
LagrangianForm = lag_form.LagrangianForm
class DualOp(bound_propagation.Bound):
"""Lagrangian dual contribution."""
def __init__(
self,
name,
base_bound: bound_propagation.Bound,
affine_fn: Callable[[jnp.ndarray], jnp.ndarray],
inputs: Optional[Sequence[Union['DualOp', jnp.ndarray]]] = None,
relu_preact_name: Optional[int] = None,
):
self.name = name
self._base_bound = base_bound
self._affine_fn = affine_fn
self._inputs = inputs
self._relu_preact_name = relu_preact_name
@property
def base_bound(self) -> bound_propagation.Bound:
return self._base_bound
@property
def lower(self) -> jnp.ndarray:
return self._base_bound.lower
@property
def upper(self) -> jnp.ndarray:
return self._base_bound.upper
@property
def shape(self) -> Sequence[int]:
return self._base_bound.lower.shape
def affine(self, act_or_input):
return self._affine_fn(act_or_input)
@property
def is_input(self) -> bool:
return self._inputs is None
@property
def is_relu(self) -> bool:
return self._relu_preact_name is not None
@property
def relu_preact_name(self) -> int:
if self._relu_preact_name is None:
raise ValueError('Not an activation.')
return self._relu_preact_name
@property
def inputs(self) -> Sequence[Union['DualOp', jnp.ndarray]]:
if self._inputs is None:
raise ValueError('Input node does not have inputs')
return self._inputs
_affine_primitives_list = [
*bound_propagation.AFFINE_PRIMITIVES,
*bound_propagation.RESHAPE_PRIMITIVES,
lax.div_p,
]
class _LagrangianTransform(graph_traversal.GraphTransform[DualOp]):
"""Identifies graph nodes having Lagrangian dual contributions."""
def __init__(self, boundprop_transform: bound_propagation.BoundTransform):
"""Defines propagation of Lagrangian dual contributions.
Args:
boundprop_transform: Basic Jax primitive ops' equivalents for
the underlying bound propagation method.
"""
self._boundprop_transform = boundprop_transform
def input_transform(self, context, input_bound):
in_bounds = self._boundprop_transform.input_transform(context, input_bound)
return DualOp(context.index, in_bounds, lambda x: x, inputs=None)
def primitive_transform(self, context, primitive, *args, **params):
interval_args = [arg.base_bound if isinstance(arg, DualOp) else arg
for arg in args]
out_bounds, = self._boundprop_transform.equation_transform(
context, primitive, *interval_args, **params)
if primitive in _affine_primitives_list:
if (primitive in bound_propagation.BILINEAR_PRIMITIVES and
isinstance(args[0], DualOp) and isinstance(args[1], DualOp)):
raise NotImplementedError(
'Multiplication with two non-constant inputs is not supported')
elif primitive == lax.div_p and isinstance(args[1], DualOp):
raise NotImplementedError(
f'Division with non-constant divisor {args[1]} is not supported')
# Compose this affine primitive with the inputs' own affine functions
# in terms of the previous ReLU activation (or original network input).
def affine_fn(act_or_input):
return primitive.bind(*[
arg.affine(act_or_input) if isinstance(arg, DualOp) else arg
for arg in args], **params)
return DualOp(context.index, out_bounds, affine_fn, inputs=args)
elif primitive == synthetic_primitives.relu_p:
return DualOp(
context.index, out_bounds, lambda x: x, inputs=args,
relu_preact_name=args[0].name)
else:
raise NotImplementedError(f'Unsupported primitive: {primitive}')
class InnerMaxStrategy(metaclass=abc.ABCMeta):
"""Solve inner maximisations."""
jittable = True
@abc.abstractmethod
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
"""Solve maximization problem of opt_instance.
Args:
inner_dual_vars: Dual variables for the inner maximisation.
opt_instance: Verification instance that defines optimization problem to
be solved.
key: Jax PRNG key.
step: outer optimization iteration number.
Returns:
max_value: final value of the objective function found.
"""
def supports_stochastic_parameters(self):
return False
def build_spec(self, opt_instance: InnerVerifInstance, step: int,
softmax: bool = False):
"""Build objective function for the maximization problem."""
# affine_fns are assumed to be non-batched in both inputs and ouputs
affine_fns = opt_instance.affine_fns
lag_form_pre = opt_instance.lagrangian_form_pre
lag_form_post = opt_instance.lagrangian_form_post
def forward_relu_before_affine(x):
# we use relu before affine ordering
# -> first op is relu unless this is the first layer
if not opt_instance.is_first:
x = jax.nn.relu(x)
# forward through intermediate layers of opt_instance
for affine_fn in affine_fns[:-1]:
x = affine_fn(x)
x = jax.nn.relu(x)
# forward through last layer of opt_instance
x = affine_fns[-1](x)
return x
def forward_affine_before_relu(x):
# forward through intermediate layers of opt_instance
for affine_fn in affine_fns[:-1]:
x = affine_fn(x)
x = jax.nn.relu(x)
# forward through last layer of opt_instance, which contains activations
# unless it is the last layer of the network
x = affine_fns[-1](x)
if not opt_instance.is_last:
x = jax.nn.relu(x)
return x
forward = (
forward_affine_before_relu if opt_instance.affine_before_relu
else forward_relu_before_affine)
def obj_first(x, duals_pre, duals_post):
del duals_pre # unused
return lag_form_post.apply(forward(x), duals_post, step)
def obj_intermediate(x, duals_pre, duals_post):
return (lag_form_post.apply(forward(x), duals_post, step)
- lag_form_pre.apply(x, duals_pre, step))
def obj_last(x, duals_pre, duals_post):
del duals_post # unused
if softmax:
y = jax.nn.softmax(x)
else:
y = x
return forward(y) - lag_form_pre.apply(x, duals_pre, step)
if opt_instance.is_first:
return obj_first
elif opt_instance.is_last:
return obj_last
else:
return obj_intermediate
def init_duals(
self,
boundprop_transform: bound_propagation.BoundTransform,
spec_type: verify_utils.SpecType,
affine_before_relu: bool,
spec_fn: Callable[..., jnp.ndarray],
key: jnp.ndarray,
lagrangian_form_per_layer: Iterable[LagrangianForm],
*input_bounds: Nest[graph_traversal.GraphInput],
) -> Tuple[Dict[int, DualOp], Params, ParamsTypes]:
"""Initialize the dual parameters and their types (Inequality vs Equality).
Args:
boundprop_transform: Underlying bound propagation method.
spec_type: Type of specification, adversarial robustness, uncertainty.
affine_before_relu: whether layer ordering uses the affine layer before
the ReLU.
spec_fn: Specification function to bound above.
key: PRNGKey used while initializing trainable params.
lagrangian_form_per_layer: Sequence of LagrangianForm
instances whose 'init_params' function initialises the parameters of
the layer's functional Lagrangian.
*input_bounds: Interval bounds on the inputs of `spec_fn`.
Returns:
env: Lagrangian computations for each contributing graph node.
dual_params: lagrangian parameters as 'outer', dummy params as 'inner'.
dual_params_types: constraint types (inequality vs equality) for
'outer' and 'inner', governing whether to project.
"""
# Analyse the graph, propagating (or applying) bounds along the way.
_, env = bound_propagation.bound_propagation(
bound_propagation.ForwardPropagationAlgorithm(
_LagrangianTransform(boundprop_transform)),
spec_fn, *input_bounds)
env = {
op.name: op for op in env.values()
if isinstance(op, DualOp)}
make_equality_constraint = lambda s: sdp_utils.DualVarTypes.EQUALITY
# initialize outer variables and types
lagrangian_form = {}
lagrange_params = {}
lagrangian_form_iter = iter(lagrangian_form_per_layer)
for name, op in env.items():
if op.is_relu:
lagrangian_form[name] = next(lagrangian_form_iter)
key, layer_key = jax.random.split(key, 2)
lagrange_params[name] = lagrangian_form[name].init_params(
layer_key, op.shape[1:])
elif op.is_input or op.name == max(env):
# special case for first and last layers
lagrangian_form[name] = None
lagrange_params[name] = None
lagrange_params_types = jax.tree_map(
make_equality_constraint, lagrange_params)
inner_problems = _enumerate_inner_max_problems(
env, lagrangian_form, lagrange_params, spec_type, affine_before_relu)
# Initialize inner variables and types
inner_params = []
inner_params_types = []
for inner_problem in inner_problems:
layer_inner_params, layer_inner_params_types = (
self.init_layer_inner_params(inner_problem))
inner_params.append(layer_inner_params)
inner_params_types.append(layer_inner_params_types)
dual_params = Params(inner=inner_params, outer=lagrange_params)
dual_params_types = ParamsTypes(
inner=inner_params_types, outer=lagrange_params_types,
lagrangian_form=lagrangian_form)
return env, dual_params, dual_params_types
@abc.abstractmethod
def init_layer_inner_params(
self, opt_instance: verify_utils.InnerVerifInstance) -> Tuple[Any, Any]:
"""Initialises duals and their types for a single inner maximisation.
Args:
opt_instance: The context (nearby bounds and outer duals) for the
layer's inner maximisation problem.
Returns:
inner_params: parameters for the 'inner' optimisation.
inner_params_types: constraint types (inequality vs equality) for
the 'inner' optimisation, governing whether to project.
"""
def project_dual(dual_params: Params,
dual_params_types: ParamsTypes) -> Params:
"""Project the dual variables."""
projected_inner_vars = sdp_verify.project_duals(dual_params.inner,
dual_params_types.inner)
projected_outer_vars = sdp_verify.project_duals(dual_params.outer,
dual_params_types.outer)
new_dual_params = dual_params._replace(
inner=projected_inner_vars, outer=projected_outer_vars)
return new_dual_params
def build_dual_fun(
env: Dict[int, DualOp],
lagrangian_form: Dict[int, LagrangianForm],
inner_opt: InnerMaxStrategy,
affine_before_relu: bool,
spec_type: verify_utils.SpecType,
merge_problems: Optional[Dict[int, int]] = None,
) -> Callable[[Params, jnp.ndarray, int], jnp.ndarray]:
"""Build the dual function that takes as input the inner/outer lagrangian parameters.
Args:
env: Lagrangian computations for each contributing graph node.
lagrangian_form: Dictionary, keyed by layer index, of LagrangianForm
instances whose 'apply' function accepts hidden-layer activations and
the parameters for the functional lagrange multplier, and returns a scalar
value.
inner_opt: Inner optimisation strategy.
affine_before_relu: whether layer ordering uses the affine layer before
the ReLU.
spec_type: Specification type, adversarial or uncertainty specification.
merge_problems: the key of the dictionary corresponds to the index of the
layer to begin the merge, and the associated value corresponds to the
number of consecutive layers to be merged with it.
For example, `{0: 2, 2: 3}` will merge together layer 0 and 1,
as well as layers 2, 3 and 4.
Returns:
A function that is a (possibly proxy) upper bound on the verification
objective, and takes as input the inner and outer dual variables, and the
PRNG key.
"""
def dual_loss_fun(
dual_params: Params, key: jnp.ndarray, step: int
) -> jnp.ndarray:
lagrange_params = dual_params.outer
inner_vars_list = dual_params.inner
inner_problems = _enumerate_inner_max_problems(
env, lagrangian_form, lagrange_params, spec_type, affine_before_relu)
if merge_problems:
inner_problems = _merge_specified_instances(
inner_problems, merge_problems)
# accumulate loss over inner optimization problems
loss = 0.0
stats = {}
for inner_problem, inner_vars in zip(inner_problems, inner_vars_list):
key, inner_key = jax.random.split(key, 2)
loss_inner_problem = inner_opt.solve_max(
inner_vars, inner_problem, key=inner_key, step=step)
assert loss_inner_problem.ndim == 1
# assuming batch_size of 1 for now
loss_inner_problem = jnp.reshape(loss_inner_problem, ())
stats[f'loss_problem_{inner_problem.idx}'] = loss_inner_problem
loss += loss_inner_problem
stats['loss'] = loss
return loss, stats # pytype: disable=bad-return-type # jnp-array
return dual_loss_fun
def _enumerate_inner_max_problems(
env: Dict[int, DualOp],
lagrangian_form: Dict[int, LagrangianForm],
lagrange_params: Dict[int, Any],
spec_type: verify_utils.SpecType,
affine_before_relu: bool,
) -> List[InnerVerifInstance]:
"""Enumerates the inner maximisation problems."""
# iteratively create inner problems: each innner problem links the
# output of a layer to the next
inner_problems = []
idx = 0
for op in env.values():
is_last = op.name == max(env)
if op.is_relu or is_last:
preact_op = env[op.relu_preact_name] if op.is_relu else op
# Search for the previous ReLU.
prev_op = preact_op
while not (prev_op.is_input or prev_op.is_relu):
input_ops = [io for io in prev_op.inputs if isinstance(io, DualOp)]
if len(input_ops) != 1:
raise NotImplementedError('Multi-input ops not currently supported.')
prev_op = input_ops[0]
prev_preact_op = prev_op.inputs[0] if prev_op.is_relu else None
# Lagrange parameters for the equality constraint just before the layer
lagrange_params_pre = lagrange_params[prev_op.name]
# Lagrange parameters for the equality constraint just after the layer
lagrange_params_post = lagrange_params[op.name]
# corresponding constraints (obtained via e.g. bound propagation)
bounds_pre = sdp_utils.IntBound(
lb_pre=(prev_preact_op.lower if prev_preact_op is not None
else prev_op.lower),
ub_pre=(prev_preact_op.upper if prev_preact_op is not None
else prev_op.upper),
lb=prev_op.lower, ub=prev_op.upper)
bounds_post = sdp_utils.IntBound(
lb_pre=None, ub_pre=None, # not needed
lb=op.lower, ub=op.upper)
lagrangian_form_pre = lagrangian_form[prev_op.name]
lagrangian_form_post = lagrangian_form[op.name]
# create inner optimization problem
opt_instance = verify_utils.InnerVerifInstance(
affine_fns=[preact_op.affine],
bounds=[bounds_pre, bounds_post],
is_first=(lagrange_params_pre is None), is_last=is_last,
lagrangian_form_pre=lagrangian_form_pre,
lagrangian_form_post=lagrangian_form_post,
lagrange_params_post=lagrange_params_post,
lagrange_params_pre=lagrange_params_pre,
idx=idx,
spec_type=spec_type, affine_before_relu=affine_before_relu)
# if not last layer, lagrange_params_post cannot be None
assert(opt_instance.is_last or
opt_instance.lagrange_params_post is not None)
inner_problems.append(opt_instance)
idx += 1
if spec_type == verify_utils.SpecType.UNCERTAINTY:
# Uncertainty spec has this layer as the logits layer
# is_last is used to treat this layer without relu when affine_before_relu
# flag is true
inner_problems[-2] = dataclasses.replace(inner_problems[-2], is_last=True)
return inner_problems
def _merge_specified_instances(
instances: Sequence[InnerVerifInstance],
merge_specification: Dict[int, int],
) -> Sequence[InnerVerifInstance]:
"""Merge instances according to the specified list of groups to merge."""
merged_instances = []
idx = 0
merge_specification = merge_specification.copy()
while idx < len(instances):
run_length = merge_specification.pop(idx, 1) # default to single
instances_to_merge = instances[idx:(idx+run_length)]
merged_instances.append(_merge_instances(*instances_to_merge))
idx += run_length
if idx > len(instances):
raise ValueError(
f'Invalid specification (index {idx} out of {len(instances)}).')
if merge_specification:
raise ValueError(
f'Unused entry in merge_specification: {merge_specification}.')
return merged_instances
def _merge_instances(
instance_first: InnerVerifInstance,
*instances_rest: InnerVerifInstance,
) -> InnerVerifInstance:
"""Merge InnerVerifInstances together."""
if not instances_rest:
return instance_first
else:
instance_second, *instances_rest = instances_rest
if (instance_first.lagrangian_form_post
is not instance_second.lagrangian_form_pre):
raise ValueError(
'Cannot merge InnerVerifInstances with different Lagrangian forms.')
merged_instance = dataclasses.replace(
instance_first,
affine_fns=(instance_first.affine_fns + instance_second.affine_fns),
bounds=(instance_first.bounds[:-1] + instance_second.bounds),
is_first=instance_first.is_first,
is_last=instance_second.is_last,
# the solver corresponding to the first idx is used if using mixed strat
)
return _merge_instances(merged_instance, *instances_rest)
def make_opt_and_num_steps(opt_config):
"""Get optax optimizer, and number of steps to run training for."""
if opt_config.anneal_lengths:
print('Using custom annealing schedule', opt_config.anneal_lengths)
steps_per_anneal = [int(x) for x in opt_config.anneal_lengths.split(',')]
assert len(steps_per_anneal) > 1, 'for no anneals, do not use this flag'
num_steps = sum(steps_per_anneal)
steps_per_anneal = steps_per_anneal[:-1]
num_anneals = len(steps_per_anneal)
anneal_steps = np.cumsum(steps_per_anneal)
else:
num_anneals = opt_config.num_anneals
num_steps = opt_config.steps_per_anneal * (1 + opt_config.num_anneals)
anneal_steps = [
opt_config.steps_per_anneal *
(i + 1) for i in range(opt_config.num_anneals)
]
anneal_steps = jnp.array(anneal_steps)
def lr_schedule(t):
cur_epoch = jnp.minimum(num_anneals,
jnp.sum(t > anneal_steps))
return opt_config.lr_init * jnp.float_power(opt_config.anneal_factor,
cur_epoch)
opt_class = getattr(optax, opt_config.opt_name)
base_opt = opt_class(1., **opt_config.opt_kwargs)
opt = optax.chain(base_opt, optax.scale_by_schedule(lr_schedule))
return opt, num_steps
| jax_verify-master | jax_verify/extensions/functional_lagrangian/dual_build.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API to load model parameters."""
import dataclasses
import os
import pickle
from typing import Any, Optional
import urllib
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.src import utils as jv_utils
import ml_collections
import numpy as np
ConfigDict = ml_collections.ConfigDict
ModelParams = verify_utils.ModelParams
INTERNAL_MODEL_PATHS = ml_collections.ConfigDict({
'mnist_ceda': 'models/mnist_ceda.pkl',
'mnist_cnn': 'models/mnist_lenet_dropout.pkl',
'cifar_vgg_16': 'models/cifar_vgg_16_dropout.pkl',
'cifar_vgg_32': 'models/cifar_vgg_32_dropout.pkl',
'cifar_vgg_64': 'models/cifar_vgg_64_dropout.pkl',
})
PROBA_SAFETY_URL = (
'https://github.com/matthewwicker/ProbabilisticSafetyforBNNs/raw/master'
'/MNIST/concurMNIST2/MNIST_Networks')
PROBA_SAFETY_MODEL_PATHS = ml_collections.ConfigDict({
'mnist_mlp_1_1024': 'VIMODEL_MNIST_1_1024_relu.net.npz',
'mnist_mlp_1_128': 'VIMODEL_MNIST_1_128_relu.net.npz',
'mnist_mlp_1_2048': 'VIMODEL_MNIST_1_2048_relu.net.npz',
'mnist_mlp_1_256': 'VIMODEL_MNIST_1_256_relu.net.npz',
'mnist_mlp_1_4096': 'VIMODEL_MNIST_1_4096_relu.net.npz',
'mnist_mlp_1_512': 'VIMODEL_MNIST_1_512_relu.net.npz',
'mnist_mlp_1_64': 'VIMODEL_MNIST_1_64_relu.net.npz',
'mnist_mlp_2_1024': 'VIMODEL_MNIST_2_1024_relu.net.npz',
'mnist_mlp_2_128': 'VIMODEL_MNIST_2_128_relu.net.npz',
'mnist_mlp_2_256': 'VIMODEL_MNIST_2_256_relu.net.npz',
'mnist_mlp_2_512': 'VIMODEL_MNIST_2_512_relu.net.npz',
'mnist_mlp_2_64': 'VIMODEL_MNIST_2_64_relu.net.npz',
})
def _load_pickled_model(root_dir: str, model_name: str) -> ModelParams:
model_path = getattr(INTERNAL_MODEL_PATHS, model_name.lower())
if model_path.endswith('mnist_ceda.pkl'):
with jv_utils.open_file(model_path, 'rb', root_dir=root_dir) as f:
params_iterables = pickle.load(f, encoding='bytes')
else:
with jv_utils.open_file(model_path, 'rb', root_dir=root_dir) as f:
params_iterables = list(np.load(f, allow_pickle=True).item().values())
return make_model_params_from_iterables(params_iterables)
def make_model_params_from_iterables(raw_params: Any) -> ModelParams:
"""Make list of LayerParams from list of iterables."""
conv_field_names = [
f.name for f in dataclasses.fields(verify_utils.ConvParams)
]
fc_field_names = [
f.name for f in dataclasses.fields(verify_utils.FCParams)
]
net = []
for layer_params in raw_params:
if isinstance(layer_params, tuple):
w, b = layer_params
layer = verify_utils.FCParams(w=w, b=b)
elif (isinstance(layer_params, dict)
and layer_params.get('type') == 'linear'):
fc_params = dict(
(k, v) for k, v in layer_params.items() if k in fc_field_names)
if fc_params.get('dropout_rate', 0) > 0:
w = fc_params['w']
# adapt expected value of 'w'
fc_params['w'] = w * (1.0 - fc_params['dropout_rate'])
fc_params['w_bound'] = jax_verify.IntervalBound(
lower_bound=jnp.minimum(w, 0.0), upper_bound=jnp.maximum(w, 0.0))
layer = verify_utils.FCParams(**fc_params)
elif isinstance(layer_params, dict):
conv_params = dict(
(k, v) for k, v in layer_params.items() if k in conv_field_names)
# deal with 'W' vs 'w'
if 'W' in layer_params:
conv_params['w'] = layer_params['W']
layer = verify_utils.ConvParams(**conv_params)
else:
raise TypeError(
f'layer_params type not recognized: {type(layer_params)}.')
net += [layer]
return net
def _load_proba_safety_model(
root_dir: str,
model_name: str,
num_std_for_bound: float,
) -> ModelParams:
"""Load model trained in Probabilistic Safety for BNNs paper."""
model_path = getattr(PROBA_SAFETY_MODEL_PATHS, model_name.lower())
local_path = os.path.join(root_dir, model_path)
if not os.path.exists(local_path):
download_url = os.path.join(PROBA_SAFETY_URL, model_path)
urllib.request.urlretrieve(download_url, local_path)
with open(local_path, 'rb') as f:
data = np.load(f, allow_pickle=True, encoding='bytes')
if not isinstance(data, np.ndarray):
data = data['arr_0']
assert len(data) % 4 == 0
net = []
for layer_idx in range(0, len(data) // 2, 2):
# data: [w_0, b_0, w_1, b_1, ..., w_0_std, b_0_std, w_1_std, b_1_std, ...]
w = jnp.array(data[layer_idx])
b = jnp.array(data[layer_idx + 1])
w_std = jnp.array(data[layer_idx + len(data) // 2])
b_std = jnp.array(data[layer_idx + len(data) // 2 + 1])
w_bound = jax_verify.IntervalBound(w - num_std_for_bound * w_std,
w + num_std_for_bound * w_std)
b_bound = jax_verify.IntervalBound(b - num_std_for_bound * b_std,
b + num_std_for_bound * b_std)
net += [
verify_utils.FCParams(
w=w,
b=b,
w_std=w_std,
b_std=b_std,
w_bound=w_bound,
b_bound=b_bound)
]
return net
def load_model(
root_dir: str,
model_name: str,
num_std_for_bound: Optional[float],
) -> ModelParams:
"""Load and process model parameters."""
if model_name.startswith('mnist_mlp'):
return _load_proba_safety_model(root_dir, model_name, num_std_for_bound)
else:
return _load_pickled_model(root_dir, model_name)
| jax_verify-master | jax_verify/extensions/functional_lagrangian/model.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to obtain interval bounds on the activations."""
import dataclasses
import functools
import time
from typing import Any, Dict, List, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import specification
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import boundprop_utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
from jax_verify.src import bound_propagation
from jax_verify.src import synthetic_primitives
import ml_collections
ConfigDict = ml_collections.ConfigDict
DataSpec = verify_utils.DataSpec
IntervalBound = jax_verify.IntervalBound
SpecType = verify_utils.SpecType
Tensor = jnp.array
LayerParams = verify_utils.LayerParams
ModelParams = verify_utils.ModelParams
ModelParamsElided = verify_utils.ModelParamsElided
def make_elided_params_and_bounds(
config: ConfigDict,
data_spec: DataSpec,
spec_type: SpecType,
params: ModelParams,
) -> Tuple[ModelParamsElided, Sequence[sdp_utils.IntBound], Tensor, float]:
"""Make the elided parameters and bounds according to the specification."""
if spec_type in (verify_utils.SpecType.UNCERTAINTY,
verify_utils.SpecType.PROBABILITY_THRESHOLD):
probability_threshold = 0.
if spec_type == verify_utils.SpecType.PROBABILITY_THRESHOLD:
probability_threshold = config.problem.probability_threshold
params_elided = specification.elide_uncertainty_spec(
params, data_spec, probability_threshold)
# special care of indexing below since the spec layer has been added on top
# of the logits layer in params_elided
params_elided_boundprop = specification.elide_uncertainty_spec(
params, data_spec, 0.)
# Add specification layer to graph and perform bound propagation through
# the whole model
start_time = time.time()
bounds = get_bounds(
x=data_spec.input,
epsilon=data_spec.epsilon,
input_bounds=data_spec.input_bounds,
params=params_elided_boundprop,
config=config)
bp_bound = upper_bound_log_softmax(
bounds[-3].lb,
bounds[-3].ub, # bounds on inputs of logits layer
params_elided_boundprop[-2], # logits layer parameters
data_spec.target_label)
elapsed_time = time.time() - start_time
elif spec_type == spec_type.ADVERSARIAL_SOFTMAX:
start_time = time.time()
params_elided = specification.elide_adversarial_softmax_spec(
params, data_spec)
bounds = get_bounds(
x=data_spec.input,
epsilon=data_spec.epsilon,
input_bounds=data_spec.input_bounds,
params=params_elided,
config=config)
# upper bound on difference between target logits and true logits
bp_bound = (
bounds[-2].ub[:, data_spec.target_label] -
bounds[-2].lb[:, data_spec.true_label])
elapsed_time = time.time() - start_time
else:
params_elided = specification.elide_adversarial_spec(params, data_spec)
start_time = time.time()
bounds = get_bounds(
x=data_spec.input,
epsilon=data_spec.epsilon,
input_bounds=data_spec.input_bounds,
params=params_elided,
config=config)
elapsed_time = time.time() - start_time
bp_bound = bounds[-1].ub_pre
return params_elided, bounds, bp_bound, elapsed_time
def _make_all_act_fn(params: ModelParams):
"""Make forward function."""
def all_act_fn(
inputs: Tensor,
*given_params: Sequence[Tuple[Tensor, Tensor]],
) -> Tensor:
given_params = iter(given_params)
net_params = []
for layer_params in params:
if layer_params.has_bounds:
w, b = next(given_params)
kwargs = {}
if layer_params.w_bound is not None:
kwargs['w'] = w
if layer_params.b_bound is not None:
kwargs['b'] = b
layer_params = dataclasses.replace(layer_params, **kwargs)
net_params.append(layer_params)
return sdp_utils.predict_cnn(
net_params, inputs, include_preactivations=True)
return all_act_fn
def _compute_jv_bounds(
input_bound: sdp_utils.IntBound,
params: ModelParams,
method: str,
) -> List[sdp_utils.IntBound]:
"""Compute bounds with jax_verify."""
jv_input_bound = jax_verify.IntervalBound(input_bound.lb, input_bound.ub)
# create a function that takes as arguments the input and all parameters
# that have bounds (as specified in param_bounds) and returns all
# activations
all_act_fun = _make_all_act_fn(params)
# use jax_verify to perform (bilinear) interval bound propagation
jv_param_bounds = [(p.w_bound, p.b_bound) for p in params if p.has_bounds]
if method == 'ibp':
_, jv_bounds = jax_verify.interval_bound_propagation(
all_act_fun, jv_input_bound, *jv_param_bounds)
elif method == 'fastlin':
_, jv_bounds = jax_verify.forward_fastlin_bound_propagation(
all_act_fun, jv_input_bound, *jv_param_bounds)
elif method == 'ibpfastlin':
_, jv_bounds = jax_verify.ibpforwardfastlin_bound_propagation(
all_act_fun, jv_input_bound, *jv_param_bounds)
elif method == 'crown':
_, jv_bounds = jax_verify.backward_crown_bound_propagation(
all_act_fun, jv_input_bound, *jv_param_bounds)
elif method == 'nonconvex':
_, jv_bounds = jax_verify.nonconvex_constopt_bound_propagation(
all_act_fun, jv_input_bound, *jv_param_bounds)
else:
raise ValueError('Unsupported method.')
# re-format bounds with internal convention
bounds = []
for intermediate_bound in jv_bounds:
bounds.append(
sdp_utils.IntBound(
lb_pre=intermediate_bound.lower,
ub_pre=intermediate_bound.upper,
lb=jnp.maximum(intermediate_bound.lower, 0),
ub=jnp.maximum(intermediate_bound.upper, 0)))
return bounds
def _compute_standard_bounds(
x: Tensor,
epsilon: float,
input_bounds: Sequence[int],
params: ModelParams,
config: Union[ConfigDict, Dict[str, Any]],
):
"""Perform bound-propagation and return bounds.
Args:
x: input to the model under verification.
epsilon: radius of l-infinity ball around x.
input_bounds: feasibility bounds of inputs (e.g. [0, 1]).
params: parameters of the model under verification.
config: experiment ConfigDict.
Returns:
List of bounds per layer, including the input bounds as the first element.
"""
for param in params:
if param.has_bounds:
raise ValueError('Unsupported bilinear bound propagation.')
if config['boundprop_type'] == 'nonconvex':
bounds = boundprop_utils.boundprop(
params,
jnp.expand_dims(x, axis=0),
epsilon,
input_bounds,
'nonconvex',
nonconvex_boundprop_steps=config['nonconvex_boundprop_steps'],
nonconvex_boundprop_nodes=config['nonconvex_boundprop_nodes'],
)
elif config['boundprop_type'] == 'crown_ibp':
bounds = boundprop_utils.boundprop(params, jnp.expand_dims(x, axis=0),
epsilon, input_bounds, 'crown_ibp')
else:
# initial bounds for boundprop
init_bounds = sdp_utils.init_bound(x, epsilon, input_bounds=input_bounds)
bounds = [init_bounds] + _compute_jv_bounds(
input_bound=init_bounds, params=params, method=config['boundprop_type'])
return bounds
def get_bounds(
x: Tensor,
epsilon: float,
input_bounds: Sequence[int],
params: ModelParams,
config: Union[ConfigDict, Dict[str, Any]],
) -> List[sdp_utils.IntBound]:
"""Perform bound-propagation and return bounds.
The code assumes that the sequential model can be split into two parts. The
first part (potentially empty) does not contain any bound on the parameters
and can thus use boundprop as usual. The second part (potentially empty)
contains parameter bounds and thus employs a method that supports bilinear
bound propagation.
Args:
x: input to the model under verification.
epsilon: radius of l-infinity ball around x.
input_bounds: feasibility bounds of inputs (e.g. [0, 1]).
params: parameters of the model under verification.
config: experiment ConfigDict.
Returns:
List of bounds per layer, including the input bounds as the first element.
"""
if config['boundprop_type'] != config['bilinear_boundprop_type']:
# when using a different boundprop method for bilinear operations, partition
# parameters used for "standard" boundprop vs bilinear boundprop
first_idx_with_param_bounds = 0
for param in params:
if param.has_bounds:
break
first_idx_with_param_bounds += 1
params_standard_boundprop, params_bilinear_boundprop = (
params[:first_idx_with_param_bounds],
params[first_idx_with_param_bounds:])
else:
params_standard_boundprop = []
params_bilinear_boundprop = params
if params_standard_boundprop:
bounds_standard = _compute_standard_bounds(
x=x,
epsilon=epsilon,
input_bounds=input_bounds,
params=params_standard_boundprop,
config=config,
)
else:
bounds_standard = [
sdp_utils.init_bound(x, epsilon, input_bounds=input_bounds)
]
if params_bilinear_boundprop:
bounds_bilinear = _compute_jv_bounds(
input_bound=bounds_standard[-1],
params=params_bilinear_boundprop,
method=config['bilinear_boundprop_type'],
)
else:
bounds_bilinear = []
return bounds_standard + bounds_bilinear
class BoundsFromCnn(bound_propagation.BoundTransform):
"""Precomputed bounds from a sequential CNN."""
def __init__(self, bounds: Sequence[sdp_utils.IntBound]):
self._cnn_bounds = bounds
self._cnn_layer_indices = {}
def input_transform(self, context, input_bound):
if context.index not in self._cnn_layer_indices:
self._cnn_layer_indices[context.index] = 0, False
return self._bounds_from_cnn_layer(context.index)
def primitive_transform(self, context, primitive, *args, **kwargs):
if context.index not in self._cnn_layer_indices:
layer_index, was_preact = list(self._cnn_layer_indices.values())[-1]
if not was_preact:
# Previous op was a ReLU. Move into a new layer.
layer_index += 1
is_preact = primitive != synthetic_primitives.relu_p
self._cnn_layer_indices[context.index] = layer_index, is_preact
return self._bounds_from_cnn_layer(context.index)
def _bounds_from_cnn_layer(self, index):
layer_index, is_preact = self._cnn_layer_indices[index]
if is_preact:
return jax_verify.IntervalBound(self._cnn_bounds[layer_index].lb_pre,
self._cnn_bounds[layer_index].ub_pre)
else:
return jax_verify.IntervalBound(self._cnn_bounds[layer_index].lb,
self._cnn_bounds[layer_index].ub)
def _get_reciprocal_bound(
l: jnp.ndarray, u: jnp.ndarray, logits_params: LayerParams, label: int
) -> jnp.ndarray:
"""Helped for computing bound on label softmax given interval bounds on pre logits."""
def fwd(x, w, b):
wdiff = jnp.reshape(w[:, label], [-1, 1]) - w
bdiff = b[label] - b
return x @ wdiff + bdiff
x_bound = jax_verify.IntervalBound(
lower_bound=jnp.reshape(l, [l.shape[0], -1]),
upper_bound=jnp.reshape(u, [u.shape[0], -1]))
params_bounds = []
if logits_params.w_bound is None:
fwd = functools.partial(fwd, w=logits_params.w)
else:
params_bounds.append(logits_params.w_bound)
if logits_params.b_bound is None:
fwd = functools.partial(fwd, b=logits_params.b)
else:
params_bounds.append(logits_params.b_bound)
fwd_bound = jax_verify.interval_bound_propagation(fwd, x_bound,
*params_bounds)
return fwd_bound
def upper_bound_log_softmax(
l: Tensor,
u: Tensor,
logits_params: LayerParams,
target_label: int,
) -> Tensor:
"""Get bound on target label softmax given interval bounds on pre logits.
Args:
l: Array of lower bounds on pre-logits layer.
u: Array of upper bounds on pre-logits layer.
logits_params: parameters of the final logits layer.
target_label: Target label whose softmax we want to bound.
Returns:
Upper bound on log softmax of target label.
"""
fwd_bound = _get_reciprocal_bound(l, u, logits_params, target_label)
return -jax.nn.logsumexp(-fwd_bound.upper) # pytype: disable=attribute-error # jnp-array
| jax_verify-master | jax_verify/extensions/functional_lagrangian/bounding.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to elide the specification objective with the model."""
import dataclasses
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import verify_utils
import ml_collections
import numpy as np
ConfigDict = ml_collections.ConfigDict
DataSpec = verify_utils.DataSpec
IntervalBound = jax_verify.IntervalBound
SpecType = verify_utils.SpecType
Tensor = jnp.array
LayerParams = verify_utils.LayerParams
ModelParams = verify_utils.ModelParams
ModelParamsElided = verify_utils.ModelParamsElided
def elide_adversarial_spec(
params: ModelParams,
data_spec: DataSpec,
) -> ModelParamsElided:
"""Elide params to have last layer merged with the adversarial objective.
Args:
params: parameters of the model under verification.
data_spec: data specification.
Returns:
params_elided: elided parameters with the adversarial objective folded in
the last layer (and bounds adapted accordingly).
"""
def elide_fn(w_fin, b_fin):
label_onehot = jnp.eye(w_fin.shape[-1])[data_spec.true_label]
target_onehot = jnp.eye(w_fin.shape[-1])[data_spec.target_label]
obj_orig = target_onehot - label_onehot
obj_bp = jnp.matmul(w_fin, obj_orig)
const = jnp.expand_dims(jnp.vdot(obj_orig, b_fin), axis=-1)
obj = jnp.reshape(obj_bp, (obj_bp.size, 1))
return obj, const
last_params = params[-1]
w_elided, b_elided = elide_fn(last_params.w, last_params.b)
last_params_elided = verify_utils.FCParams(w_elided, b_elided)
if last_params.has_bounds:
w_bound_elided, b_bound_elided = jax_verify.interval_bound_propagation(
elide_fn, last_params.w_bound, last_params.b_bound)
last_params_elided = dataclasses.replace(
last_params_elided, w_bound=w_bound_elided, b_bound=b_bound_elided)
params_elided = params[:-1] + [last_params_elided]
return params_elided
def elide_adversarial_softmax_spec(
params: ModelParams,
data_spec: DataSpec,
) -> ModelParamsElided:
"""Elide params to have uncertainty objective appended as a new last layer.
Args:
params: parameters of the model under verification.
data_spec: data specification.
Returns:
params_elided: parameters with the uncertainty objective appended as
the last 'layer'.
"""
op_size = params[-1].w.shape[-1]
e = np.zeros((op_size, 1))
e[data_spec.target_label] = 1.
e[data_spec.true_label] = -1.
params_elided = params + [verify_utils.FCParams(jnp.array(e), jnp.zeros(()))]
return params_elided
def elide_uncertainty_spec(
params: ModelParams,
data_spec: DataSpec,
probability_threshold: float,
) -> ModelParamsElided:
"""Elide params to have uncertainty objective appended as a new last layer.
Args:
params: parameters of the model under verification.
data_spec: data specification.
probability_threshold: Maximum probability threshold for OOD detection.
Returns:
params_elided: parameters with the uncertainty objective appended as
the last 'layer'.
"""
op_size = params[-1].w.shape[-1]
e = np.zeros((op_size, 1))
e[data_spec.target_label] = 1.
e -= probability_threshold
params_elided = params + [verify_utils.FCParams(jnp.array(e), jnp.zeros(()))]
return params_elided
| jax_verify-master | jax_verify/extensions/functional_lagrangian/specification.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solve dual."""
import time
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import bounding
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import inner_solvers
from jax_verify.extensions.functional_lagrangian import lagrangian_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import ml_collections
import optax
ConfigDict = ml_collections.ConfigDict
DualOp = dual_build.DualOp
InnerMaxStrategy = dual_build.InnerMaxStrategy
ModelParams = verify_utils.ModelParams
Params = verify_utils.Params
ParamsTypes = verify_utils.ParamsTypes
Tensor = jnp.array
def solve_dual(
config: ConfigDict,
bounds: Sequence[sdp_utils.IntBound],
spec_type: verify_utils.SpecType,
spec_fn: Callable[..., jnp.ndarray],
params: ModelParams,
dual_state: ConfigDict,
mode: str,
logger: Callable[[int, Mapping[str, Any]], None],
) -> Tuple[float, Tensor]:
"""Run verification algorithm and update dual_state."""
key_carry, key_init, key_solve = jax.random.split(
jax.random.PRNGKey(config.seed), 3)
# define Lagrangian form per layer
if isinstance(config.dual.lagrangian_form, list):
lagrangian_form_per_layer = [
lagrangian_form.get_lagrangian_form(x)
for x in config.dual.lagrangian_form
]
else:
lagrangian_form_per_layer = [
lagrangian_form.get_lagrangian_form(config.dual.lagrangian_form)
for _ in bounds
]
inner_opt = inner_solvers.get_strategy(config, params, mode)
input_bounds = jax_verify.IntervalBound(bounds[0].lb, bounds[0].ub)
boundprop_transform = bounding.BoundsFromCnn(bounds)
env, dual_params, dual_params_types = inner_opt.init_duals(
boundprop_transform, spec_type, config.dual.affine_before_relu, spec_fn,
key_init, lagrangian_form_per_layer, input_bounds)
device_type = ('gpu' if config.use_gpu else 'cpu')
if mode == 'train':
opt, num_steps = dual_build.make_opt_and_num_steps(config.outer_opt)
dual_state = solve_dual_train(
env,
key=key_solve,
num_steps=num_steps,
opt=opt,
dual_params=dual_params,
dual_params_types=dual_params_types,
affine_before_relu=config.dual.affine_before_relu,
spec_type=spec_type,
inner_opt=inner_opt,
logger=logger,
device_type=device_type,
block_to_time=config.block_to_time,
dual_state=dual_state,
)
elif mode == 'eval':
dual_state.loss = solve_dual_eval(
env,
step=dual_state.step,
key=key_solve,
dual_params=dual_state.dual_params,
dual_params_types=dual_params_types,
affine_before_relu=config.dual.affine_before_relu,
logger=logger,
inner_opt=inner_opt,
spec_type=spec_type,
)
else:
raise ValueError(f'Invalid mode: {mode}.')
return key_carry
def solve_dual_train(
env: Dict[int, DualOp],
dual_state: ConfigDict,
opt: optax.GradientTransformation,
inner_opt: InnerMaxStrategy,
dual_params: Params,
spec_type: verify_utils.SpecType,
dual_params_types: ParamsTypes,
logger: Callable[[int, Mapping[str, Any]], None],
key: jnp.ndarray,
num_steps: int,
affine_before_relu: bool,
device_type=None,
merge_problems: Optional[Dict[int, int]] = None,
block_to_time: bool = False,
) -> ConfigDict:
"""Compute verified upper bound via functional lagrangian relaxation.
Args:
env: Lagrangian computations for each contributing graph node.
dual_state: state of the dual problem.
opt: an optimizer for the outer Lagrangian parameters.
inner_opt: inner optimization strategy for training.
dual_params: dual parameters to be minimized via gradient-based
optimization.
spec_type: Specification type, adversarial or uncertainty specification.
dual_params_types: types of inequality encoded by the corresponding
dual_params.
logger: logging function.
key: jax.random.PRNGKey.
num_steps: total number of outer optimization steps.
affine_before_relu: whether layer ordering uses the affine layer before the
ReLU.
device_type: string, used to clamp to a particular hardware device. Default
None uses JAX default device placement.
merge_problems: the key of the dictionary corresponds to the index of the
layer to begin the merge, and the associated value corresponds to the
number of consecutive layers to be merged with it.
For example, `{0: 2, 2: 3}` will merge together layer 0 and 1, as well as
layers 2, 3 and 4.
block_to_time: whether to block computations at the end of each iteration to
account for asynchronicity dispatch when timing.
Returns:
dual_state: new state of the dual problem.
info: various information for logging / debugging.
"""
assert device_type in (None, 'cpu', 'gpu'), 'invalid device_type'
# create dual functions
loss_func = dual_build.build_dual_fun(
env=env,
lagrangian_form=dual_params_types.lagrangian_form,
inner_opt=inner_opt,
merge_problems=merge_problems,
affine_before_relu=affine_before_relu,
spec_type=spec_type)
value_and_grad = jax.value_and_grad(loss_func, has_aux=True)
def grad_step(params, opt_state, key, step):
(loss_val, stats), g = value_and_grad(params, key, step)
updates, new_opt_state = opt.update(g, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state, loss_val, stats
# Some solvers (e.g. MIP) cannot be jitted and run on CPU only
if inner_opt.jittable:
grad_step = jax.jit(grad_step, backend=device_type)
dual_state.step = 0
dual_state.key = key
dual_state.opt_state = opt.init(dual_params)
dual_state.dual_params = dual_params
dual_state.loss = 0.0
dual_state.best_loss = jnp.inf
dual_state.best_dual_params = dual_params
# optimize the dual (Lagrange) parameters with a gradient-based optimizer
while dual_state.step < num_steps:
key_step, dual_state.key = jax.random.split(dual_state.key)
start_time = time.time()
dual_params, dual_state.opt_state, dual_state.loss, stats = grad_step(
dual_state.dual_params, dual_state.opt_state, key_step, dual_state.step)
dual_params = dual_build.project_dual(dual_params, dual_params_types)
if dual_state.loss <= dual_state.best_loss:
dual_state.best_loss = dual_state.loss
# store value from previous iteration as loss corresponds to those params
dual_state.best_dual_params = dual_state.dual_params
dual_state.dual_params = dual_params # projected dual params
if block_to_time:
dual_state.loss.block_until_ready() # asynchronous dispatch
stats['time_per_iteration'] = time.time() - start_time
stats['best_loss'] = dual_state.best_loss
stats['dual_params_norm'] = optax.global_norm(dual_state.dual_params)
logger(dual_state.step, stats)
dual_state.step += 1
return dual_state
def solve_dual_eval(
env: Dict[int, DualOp],
inner_opt: InnerMaxStrategy,
dual_params: Params,
spec_type: verify_utils.SpecType,
dual_params_types: ParamsTypes,
logger: Callable[[int, Mapping[str, Any]], None],
key: jnp.ndarray,
affine_before_relu: bool,
step: int,
merge_problems: Optional[Dict[int, int]] = None,
) -> float:
"""Compute verified upper bound via functional lagrangian relaxation.
Args:
env: Lagrangian computations for each contributing graph node.
inner_opt: inner optimization strategy for evaluation.
dual_params: dual parameters to be minimized via gradient-based
optimization.
spec_type: Specification type, adversarial or uncertainty specification.
dual_params_types: types of inequality encoded by the corresponding
dual_params.
logger: logging function.
key: jax.random.PRNGKey.
affine_before_relu: whether layer ordering uses the affine layer before the
ReLU.
step: outer training iteration number, the functional may depend on this.
merge_problems: the key of the dictionary corresponds to the index of the
layer to begin the merge, and the associated value corresponds to the
number of consecutive layers to be merged with it.
For example, `{0: 2, 2: 3}` will merge together layer 0 and 1, as well as
layers 2, 3 and 4.
Returns:
final_loss: final dual loss, which forms a valid upper bound on the
objective specified by ``verif_instance``.
"""
# create dual functions
loss_func = dual_build.build_dual_fun(
env=env,
lagrangian_form=dual_params_types.lagrangian_form,
inner_opt=inner_opt,
merge_problems=merge_problems,
affine_before_relu=affine_before_relu,
spec_type=spec_type)
start_time = time.time()
final_loss, stats = loss_func(dual_params, key, step)
final_loss.block_until_ready() # accounting for asynchronous dispatch
stats['time_per_iteration'] = time.time() - start_time
logger(0, stats)
return float(final_loss)
| jax_verify-master | jax_verify/extensions/functional_lagrangian/dual_solve.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adversarial attacks."""
import dataclasses
from typing import Callable, Union
import jax
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import optax
IntervalBound = jax_verify.IntervalBound
Tensor = jax.Array
PRNGKey = jax.Array
DataSpec = verify_utils.DataSpec
LayerParams = verify_utils.LayerParams
ModelParams = verify_utils.ModelParams
ModelParamsElided = verify_utils.ModelParamsElided
def sample_truncated_normal(
mean: Tensor,
std: Tensor,
bounds: IntervalBound,
prng_key: PRNGKey
) -> Tensor:
"""Draw sample from truncated normal distribution."""
rescaled_lower = (bounds.lower - mean) / std
rescaled_upper = (bounds.upper - mean) / std
unit_noise = jax.random.truncated_normal(
prng_key, lower=rescaled_lower, upper=rescaled_upper,
shape=mean.shape, dtype=mean.dtype)
return mean + unit_noise * std
def sample_dropout(
mean: Tensor,
dropout_rate: float,
prng_key: PRNGKey,
) -> Tensor:
"""Draw sample from dropout."""
if mean.ndim != 2:
raise ValueError(
f'Dropout only supports 2D parameters (found {mean.ndim} instead).')
retention_rate = 1.0 - dropout_rate
# reconstruct initial parameter by reverting expectation
initial_parameter = mean / retention_rate
# in "parameter space", dropping an input correspond to dropping an entire row
retention_mask_per_input = jax.random.bernoulli(
prng_key, p=retention_rate, shape=[mean.shape[0]])
retention_mask = jnp.expand_dims(retention_mask_per_input, 1)
return initial_parameter * retention_mask
def make_params_sampling_fn(
params: Union[ModelParams, ModelParamsElided],
) -> Callable[[PRNGKey], Union[ModelParams, ModelParamsElided]]:
"""Make function that samples new parameters at each call."""
def sample_fn(key: PRNGKey):
sampled_params = []
for layer_params in params:
if layer_params.w_std is not None:
assert layer_params.dropout_rate == 0.0
key, key_w = jax.random.split(key)
w_sampled = sample_truncated_normal(
mean=layer_params.w,
std=layer_params.w_std,
bounds=layer_params.w_bound,
prng_key=key_w,
)
layer_params = dataclasses.replace(layer_params, w=w_sampled)
elif layer_params.dropout_rate > 0.0:
key, key_dropout = jax.random.split(key)
w_sampled = sample_dropout(
mean=layer_params.w,
dropout_rate=layer_params.dropout_rate,
prng_key=key_dropout,
)
layer_params = dataclasses.replace(layer_params, w=w_sampled)
if layer_params.b_std is not None:
key, key_b = jax.random.split(key)
b_sampled = sample_truncated_normal(
mean=layer_params.b,
std=layer_params.b_std,
bounds=layer_params.b_bound,
prng_key=key_b,
)
layer_params = dataclasses.replace(layer_params, b=b_sampled)
sampled_params.append(layer_params)
return sampled_params
return sample_fn
def make_forward(
model_params: ModelParams,
num_samples: int,
) -> Callable[[Tensor, PRNGKey], Tensor]:
"""Make forward_fn with parameter sampling and averaging in softmax space.
Args:
model_params: model parameters.
num_samples: number of samples drawn per call to forward_fn.
Returns:
function that draws parameter samples, averages their results in softmax
space and takes the log.
"""
sampling_fn = make_params_sampling_fn(model_params)
def single_forward(inputs, prng_key):
sampled_params = sampling_fn(prng_key)
logits = sdp_utils.predict_cnn(
sampled_params, jnp.expand_dims(inputs, axis=0))
return jax.nn.log_softmax(logits)
def multiple_forward(inputs, prng_key):
different_keys = jax.random.split(prng_key, num_samples)
sampled_logits = jax.vmap(single_forward, in_axes=[None, 0])(
inputs, different_keys)
sampled_softmax = jax.nn.softmax(sampled_logits)
averaged_softmax = jnp.mean(sampled_softmax, 0)
return jnp.log(averaged_softmax)
if num_samples == 1:
return single_forward
else:
return multiple_forward
def _run_attack(
max_objective_fn: Callable[[Tensor, PRNGKey], Tensor],
projection_fn: Callable[[Tensor], Tensor],
x_init: Tensor,
prng_key: PRNGKey,
num_steps: int,
learning_rate: float,
):
"""Run attack."""
opt = optax.chain(optax.scale(-1), # maximization
optax.adam(learning_rate))
grad_fn = jax.grad(max_objective_fn)
def body_fn(it, inputs):
del it # unused
x, prng_in, opt_state = inputs
prng_out, prng_used = jax.random.split(prng_in)
grad_x = grad_fn(x, prng_used)
updates, opt_state = opt.update(grad_x, opt_state, x)
x = optax.apply_updates(x, updates)
x = projection_fn(x)
return x, prng_out, opt_state
opt_state = opt.init(x_init)
init_state = (x_init, prng_key, opt_state)
x, prng_final, _ = jax.lax.fori_loop(0, num_steps, body_fn, init_state)
return max_objective_fn(x, prng_final)
def adversarial_attack(
params: ModelParams,
data_spec: DataSpec,
spec_type: verify_utils.SpecType,
key: PRNGKey,
num_steps: int,
learning_rate: float,
num_samples: int = 1,
) -> float:
"""Adversarial attack on uncertainty spec (with parameter sampling)."""
l = jnp.clip(data_spec.input-data_spec.epsilon,
data_spec.input_bounds[0], data_spec.input_bounds[1])
u = jnp.clip(data_spec.input+data_spec.epsilon,
data_spec.input_bounds[0], data_spec.input_bounds[1])
projection_fn = lambda x: jnp.clip(x, l, u)
forward_fn = make_forward(params, num_samples)
def max_objective_fn_uncertainty(x, prng_key):
logits = jnp.reshape(forward_fn(x, prng_key), [-1])
return logits[data_spec.target_label]
def max_objective_fn_adversarial(x, prng_key):
logits = jnp.reshape(forward_fn(x, prng_key), [-1])
return logits[data_spec.target_label] - logits[data_spec.true_label]
def max_objective_fn_adversarial_softmax(x, prng_key):
logits = jnp.reshape(forward_fn(x, prng_key), [-1])
probs = jax.nn.softmax(logits, axis=-1)
return probs[data_spec.target_label] - probs[data_spec.true_label]
if (spec_type in (verify_utils.SpecType.UNCERTAINTY,
verify_utils.SpecType.PROBABILITY_THRESHOLD)):
max_objective_fn = max_objective_fn_uncertainty
elif spec_type == verify_utils.SpecType.ADVERSARIAL:
max_objective_fn = max_objective_fn_adversarial
elif spec_type == verify_utils.SpecType.ADVERSARIAL_SOFTMAX:
max_objective_fn = max_objective_fn_adversarial_softmax
else:
raise ValueError('Unsupported spec.')
return _run_attack( # pytype: disable=bad-return-type # jax-devicearray
max_objective_fn=max_objective_fn,
projection_fn=projection_fn,
x_init=data_spec.input,
prng_key=key,
num_steps=num_steps,
learning_rate=learning_rate)
| jax_verify-master | jax_verify/extensions/functional_lagrangian/attacks.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data util functions."""
import os
import pickle
from typing import Sequence, Tuple
import jax.numpy as jnp
import jax_verify
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
from jax_verify.src import utils as jv_utils
import ml_collections
import numpy as np
ConfigDict = ml_collections.ConfigDict
DataSpec = verify_utils.DataSpec
IntervalBound = jax_verify.IntervalBound
SpecType = verify_utils.SpecType
Tensor = jnp.array
LayerParams = verify_utils.LayerParams
ModelParams = verify_utils.ModelParams
ModelParamsElided = verify_utils.ModelParamsElided
DATA_PATH = ml_collections.ConfigDict({
'emnist_CEDA': 'emnist_CEDA.pkl',
'mnist': 'mnist',
'cifar10': 'cifar10',
'emnist': 'emnist',
'cifar100': 'cifar100',
})
def load_dataset(
root_dir: str,
dataset: str,
) -> Tuple[Sequence[np.ndarray], Sequence[np.ndarray]]:
"""Loads the MNIST/CIFAR/EMNIST test set examples, saved as numpy arrays."""
data_path = DATA_PATH.get(dataset)
if dataset == 'emnist_CEDA':
with jv_utils.open_file(data_path, 'rb', root_dir=root_dir) as f:
ds = pickle.load(f)
xs, ys = ds[0], ds[1]
xs = np.reshape(xs, [-1, 28, 28, 1])
ys = np.reshape(ys, [-1])
return xs, ys
else:
x_filename = os.path.join(data_path, 'x_test.npy')
y_filename = os.path.join(data_path, 'y_test.npy')
with jv_utils.open_file(x_filename, 'rb', root_dir=root_dir) as f:
xs = np.load(f)
with jv_utils.open_file(y_filename, 'rb', root_dir=root_dir) as f:
ys = np.load(f)
return xs, ys
def make_data_spec(config_problem: ConfigDict, root_dir: str) -> DataSpec:
"""Create data specification from config_problem."""
xs, ys = load_dataset(root_dir, config_problem.dataset)
if config_problem.dataset in ('cifar10', 'cifar100'):
x = sdp_utils.preprocess_cifar(xs[config_problem.dataset_idx])
epsilon, input_bounds = sdp_utils.preprocessed_cifar_eps_and_input_bounds(
shape=x.shape,
epsilon=config_problem.epsilon_unprocessed,
inception_preprocess=config_problem.scale_center)
else:
x = xs[config_problem.dataset_idx]
epsilon = config_problem.epsilon_unprocessed
input_bounds = (jnp.zeros_like(x), jnp.ones_like(x))
true_label = ys[config_problem.dataset_idx]
target_label = config_problem.target_label_idx
return DataSpec(
input=x,
true_label=true_label,
target_label=target_label,
epsilon=epsilon,
input_bounds=input_bounds)
| jax_verify-master | jax_verify/extensions/functional_lagrangian/data.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solve linear(softmax(x)) + linear(x) subject to l <= x <= b."""
import itertools
from typing import Union
import jax
import jax.numpy as jnp
import numpy as np
import scipy
def exact_opt_softmax_plus_affine(c_linear: np.ndarray, c_softmax: np.ndarray,
lb: np.ndarray, ub: np.ndarray):
"""Maximize c_softmax'* softmax(x) + c_linear'* x subject to l <= x <= u.
Args:
c_linear: (n,) numpy array of linear coefficients.
c_softmax: (n,) numpy array of softmax coefficients.
lb: (n,) numpy array of lower bounds.
ub: (n,) numpy array of upper bounds.
Returns:
objval: optimal value.
xopt: solution found for the upper bound.
"""
lb = np.reshape(lb, [-1])
ub = np.reshape(ub, [-1])
# Offset for numerical stability
offset = np.max(ub)
lb = lb - offset
ub = ub - offset
def funx(x):
x = np.reshape(x, [-1])
return np.sum(c_softmax * scipy.special.softmax(x)) + np.sum(c_linear * x)
xbest = None
obj_best = -np.inf
for nums in itertools.product(['interior', 'lower', 'upper'], repeat=lb.size):
nonbinding = np.ones((len(nums),))
const_normalization = 0.
const_numerator = 0.
# Make a copy of lb
xsol = lb + np.zeros_like(lb)
for j in range(len(nums)):
if nums[j] in ('lower', 'upper'):
cj = lb[j] if nums[j] == 'lower' else ub[j]
const_normalization += np.exp(cj)
const_numerator += c_softmax[j] * np.exp(cj)
nonbinding[j] = 0
xsol[j] = cj
xsols = []
if np.sum(nonbinding) > 0:
nonbinding = nonbinding > 0
nonbinding_vals = solve_equality_subproblem(
const_numerator, const_normalization, c_softmax[nonbinding],
c_linear[nonbinding], lb[nonbinding], ub[nonbinding])
for i in range(nonbinding_vals.shape[1]):
xsol_i = np.copy(np.reshape(xsol, [-1]))
xsol_i[nonbinding] = nonbinding_vals[:, i]
xsols.append(xsol_i)
else:
xsols = [xsol]
for xsol in xsols:
xsol = np.clip(xsol, lb, ub)
obj_cur = funx(xsol)
if obj_cur > obj_best:
obj_best = obj_cur
xbest = xsol
# Add constant correction for offsetting lb and ub
obj_best = obj_best + offset * jnp.sum(c_linear)
return obj_best, xbest
def solve_equality_subproblem(
scalar_a: float,
scalar_b: float,
coeff_vec: np.ndarray,
lagrangian_vec: np.ndarray,
lb: np.ndarray,
ub: np.ndarray,
):
"""Maximize equality constrained subproblem.
(scalar_a + coeff_vec'* exp(x))/(scalar_b + sum(exp(x))) +
lagrangian_vec '* x
subject to lb <= x <= ub.
Args:
scalar_a: Scalar
scalar_b: Scalar
coeff_vec: (n,) numpy array of rescaled softmax coefficients.
lagrangian_vec: (n,) numpy array of linear coefficients.
lb: (n,) numpy array of lower bounds.
ub: (n,) numpy array of upper bounds.
Returns:
objval: optimal value.
xopt: solution found for the upper bound.
"""
coeff_vec = np.reshape(coeff_vec, [-1])
lagrangian_vec = np.reshape(lagrangian_vec, [-1])
xopt = stationary_points(scalar_a, scalar_b, coeff_vec, lagrangian_vec)
if xopt is not None:
return np.clip(xopt, np.reshape(lb, [-1, 1]), np.reshape(ub, [-1, 1]))
else:
return np.reshape(lb, [-1, 1])
def _coeffs(f, degree):
return jnp.linalg.solve(
np.vander((np.arange(degree) + 1)).astype(float),
f((jnp.arange(degree) + 1).astype(float)))
@jax.jit
def eval_polynomial(
x: jnp.ndarray,
coeff_a: float,
coeff_b: float,
mul_coeffs: jnp.ndarray,
sub_coeffs: jnp.ndarray,
) -> jnp.ndarray:
"""Evaluate polynomial.
Evaluate the polynomial corresponding to the rational equation
(coeff_b * x - coeff_a) + sum_i mul_coeffs[i]/(x-sub_coeffs[i])
at x.
Args:
x: (n,)
coeff_a: Scalar
coeff_b: Scalar
mul_coeffs: (n,) numpy array of multiplicative coefficients
sub_coeffs: (n,) numpy array of subtractive coefficients
Returns:
Values of polynomial at x (same shape as x).
"""
result = 0.
x = jnp.reshape(x, [-1, 1])
for i in range(mul_coeffs.size):
coeffs_not_i = (np.arange(mul_coeffs.size) != i)
result += (
mul_coeffs[i] *
jnp.prod(x - jnp.reshape(sub_coeffs[coeffs_not_i], [1, -1]), axis=-1))
result = jnp.reshape(result, [-1])
result -= (
jnp.reshape(coeff_b * x - coeff_a, [-1]) * jnp.reshape(
jnp.prod(x - jnp.reshape(sub_coeffs, [1, -1]), axis=-1), [-1]))
return jnp.reshape(result, [-1])
def stationary_points(
scalar_a: float,
scalar_b: float,
c_vec: np.ndarray,
lam_vec: np.ndarray,
) -> Union[None, np.ndarray]:
"""Get stationary points for equality constrained problem.
Find stationary points of
(scalar_a + c_vec'* exp(x))/(scalar_b + sum(exp(x))) + lam_vec '* x.
Args:
scalar_a: Scalar
scalar_b: Scalar
c_vec: (n,) numpy array of multiplicative coefficients
lam_vec: (n,) numpy array of subtractive coefficients
Returns:
(n, k) array of stationary points (where k is the number of stationary
points)
"""
assert scalar_b >= 0.
# Toleranace for numerical issues
eps = 1e-5
vec_x = lambda x: np.reshape(x, [-1])
lam_vec = vec_x(lam_vec)
c_vec = vec_x(c_vec)
# Solve the scalar equation
# (coeff_b * z - coeff_a) +
# sum_i (c_vec[i] * scalar_b - scalar_a) * slam_vec[i]/(z-c_vec[i]) = 0
# for z.
# Roots of this equaltion represent possible values of
# (scalar_a + c_vec'* exp(x))/(scalar_b + sum(exp(x))) at a stationary point
# Solve by turning this into a polynomial equation by multiplying out the
# denominators of the rational terms.
# We first collect all equal terms on the denomintor, to minimize the degree
# of the resulting polynomial
c_vec_uniq = np.unique(c_vec)
lam_vec_uniq = np.zeros_like(c_vec_uniq)
for i in range(lam_vec_uniq.size):
lam_vec_uniq[i] = np.sum(lam_vec[c_vec == c_vec_uniq[i]])
lamc_uniq = (c_vec_uniq * scalar_b - scalar_a) * lam_vec_uniq
# This represents the polynomial version of the rational function
poly = lambda z: eval_polynomial(z, scalar_a, scalar_b, lamc_uniq, c_vec_uniq)
# Extract coefficients of polynomial and compute roots.
cs = _coeffs(poly, c_vec_uniq.size + 2)
roots = np.roots(cs)
# Compute x corresponding to each root
sols = []
for root in roots:
# We only consider real roots
root = np.real(root)
# p_sol represents exp(x)/(scalar_b + sum(exp(x)))
p_sol = lam_vec / (root - c_vec)
# Check that p_sol has negative entries and root does not coincide with
# c_vec, which would have rendered the rational equation undefined
if np.all(p_sol > 0.) and np.all(np.abs(root - c_vec) > eps):
# If scalar_b is positive, p_sol must add up to something smaller than 1.
# If scalar_b is zero, p_sol must add up to 1.
if scalar_b > 0. and (np.sum(p_sol) < 1.):
# Recover exp(x) from p_sol
sol = p_sol * scalar_b / (1 - np.sum(p_sol))
else:
sol = p_sol
sols.append(np.reshape(np.log(sol), [-1, 1]))
if sols:
return np.concatenate(sols, axis=-1)
else:
return None
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/exact_opt_softmax.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solve last layer inner max for probability specification."""
import enum
import itertools
from typing import Any, Tuple
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import lagrangian_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.functional_lagrangian.inner_solvers import exact_opt_softmax
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import numpy as np
import optax
import scipy
InnerVerifInstance = verify_utils.InnerVerifInstance
Tensor = jnp.array
class MaxType(enum.Enum):
EXP = 'exp'
EXP_BOUND = 'exp_bound'
class UncertaintySpecStrategy(dual_build.InnerMaxStrategy):
"""Strategy for solving inner max at final layer with uncertainty spec."""
def __init__(
self,
n_iter: int,
n_pieces: int,
solve_max: MaxType,
learning_rate: float = 1.0,
):
"""Constructor.
Args:
n_iter: number of iterations of binary search to use for inner max.
n_pieces: number of discrete points to use for scalar inner max.
solve_max: Which maximization routine to use.
learning_rate: learning-rate to use for PGD attacks.
"""
self._n_iter = n_iter
self._n_pieces = n_pieces
self._solve_max = solve_max
self._learning_rate = learning_rate
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
if self._solve_max == MaxType.EXP:
return self.solve_max_exp(inner_dual_vars, opt_instance, key, step)
elif self._solve_max == MaxType.EXP_BOUND:
return self.upper_bound_softmax_plus_affine(inner_dual_vars, opt_instance,
key, step)
else:
raise ValueError(
f'Unrecognized solve_max in uncertainty spec: {self._solve_max}.')
def supports_stochastic_parameters(self):
# does not rely on parameters
return True
def solve_max_exp(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
"""Solve inner max problem for final layer with uncertainty specification.
Maximize obj'*softmax(x) - lagrangian_form(x) subject to l<=x<=u
Args:
inner_dual_vars: () jax scalar.
opt_instance: Inner optimization instance.
key: RNG key.
step: outer optimization iteration number.
Returns:
opt: Optimal value.
"""
assert opt_instance.is_last
l = opt_instance.bounds[0].lb_pre
u = opt_instance.bounds[0].ub_pre
def lagr_form(x):
val = opt_instance.lagrangian_form_pre.apply(
x, opt_instance.lagrange_params_pre, step)
return jnp.reshape(val, ())
affine_obj = lambda x: jnp.reshape(opt_instance.affine_fns[0](x), ())
assert len(opt_instance.affine_fns) == 1
def max_objective_fn(anyx):
return affine_obj(jax.nn.softmax(anyx)) - lagr_form(anyx)
min_objective_fn = lambda x: -max_objective_fn(x)
opt = optax.adam(self._learning_rate)
grad_fn = jax.grad(min_objective_fn)
def cond_fn(inputs):
it, x, grad_x, _ = inputs
not_converged = jnp.logical_not(has_converged(x, grad_x, l, u))
return jnp.logical_and(it < self._n_iter, not_converged)
def body_fn(inputs):
it, x, _, opt_state = inputs
grad_x = grad_fn(x)
updates, opt_state = opt.update(grad_x, opt_state, x)
x = optax.apply_updates(x, updates)
x = jnp.clip(x, l, u)
it = it + 1
return it, x, grad_x, opt_state
def find_max_from_init(x):
opt_state = opt.init(x)
# iteration, x, grad_x, opt_state
init_val = (jnp.zeros(()), x, jnp.ones_like(x), opt_state)
_, adv_x, _, _ = jax.lax.while_loop(cond_fn, body_fn, init_val)
adv_x = jnp.clip(adv_x, l, u)
return jnp.reshape(max_objective_fn(jax.lax.stop_gradient(adv_x)), (1,))
# initialization heuristic 1: max when ignoring softmax
mask_ignore_softmax = jax.grad(lagr_form)(jnp.ones_like(u)) < 0
x = mask_ignore_softmax * u + (1 - mask_ignore_softmax) * l
objective_1 = find_max_from_init(x)
# initialization heuristic 2: max when ignoring affine
mask_ignore_affine = jax.grad(affine_obj)(jnp.ones_like(u)) > 0
x = mask_ignore_affine * u + (1 - mask_ignore_affine) * l
objective_2 = find_max_from_init(x)
# also try at boundaries
objective_3 = find_max_from_init(l)
objective_4 = find_max_from_init(u)
# select best of runs
objective = jnp.maximum(
jnp.maximum(objective_1, objective_2),
jnp.maximum(objective_3, objective_4))
return objective
def upper_bound_softmax_plus_affine(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
"""Upper bound (softmax + affine)-type problem with cvxpy.
Upper bound obj'*softmax(x) - lagrangian_form(x) subject to l<=x<=u
Note that this function cannot be differentiated through; using it at
training time will lead to an error.
Args:
inner_dual_vars: jax () scalar.
opt_instance: Inner optimization instance.
key: RNG key.
step: outer optimization iteration number.
Returns:
Optimal value.
Raises:
ValueError if Lagrangian form is not supported or if the problem is not
solved to optimality.
"""
if not isinstance(opt_instance.lagrangian_form_pre, lagrangian_form.Linear):
raise ValueError('Unsupported Lagrangian form.')
lower = opt_instance.bounds[0].lb_pre
upper = opt_instance.bounds[0].ub_pre
def lagr_form(x):
val = opt_instance.lagrangian_form_pre.apply(
x, opt_instance.lagrange_params_pre, step)
return jnp.reshape(val, ())
# extract coeff_linear via autodiff (including negative sign here)
coeff_linear = -jax.grad(lagr_form)(jnp.zeros_like(lower))
assert len(opt_instance.affine_fns) == 1
# extract coeff_softmax via autodiff
coeff_softmax_fn = lambda x: jnp.reshape(opt_instance.affine_fns[0](x), ())
coeff_softmax = jax.grad(coeff_softmax_fn)(jnp.zeros_like(lower))
if opt_instance.spec_type == verify_utils.SpecType.ADVERSARIAL_SOFTMAX:
upper_bounding_method = exact_opt_softmax.exact_opt_softmax_plus_affine
else:
upper_bounding_method = upper_bound_softmax_plus_affine_exact
upper_bound, _ = upper_bounding_method(
c_linear=np.array(coeff_linear).squeeze(0).astype(np.float64),
c_softmax=np.array(coeff_softmax).squeeze(0).astype(np.float64),
lb=np.array(lower).squeeze(0).astype(np.float64),
ub=np.array(upper).squeeze(0).astype(np.float64),
)
constant = (
coeff_softmax_fn(jnp.zeros_like(lower)) -
lagr_form(jnp.zeros_like(lower)))
result = jnp.array(upper_bound) + constant
return jnp.reshape(result, [lower.shape[0]])
def init_layer_inner_params(self, opt_instance):
"""Returns initial inner maximisation duals and their types."""
if self._solve_max == MaxType.EXP:
return None, sdp_utils.DualVarTypes.EQUALITY
else:
return (jnp.zeros_like(opt_instance.bounds[0].lb_pre),
sdp_utils.DualVarTypes.EQUALITY)
def has_converged(x: Tensor, grad: Tensor, l: Tensor, u: Tensor):
stuck_at_lower = jnp.logical_and(x == l, grad >= 0)
stuck_at_upper = jnp.logical_and(x == u, grad <= 0)
zero_grad = grad == 0
stuck_at_border = jnp.logical_or(stuck_at_lower, stuck_at_upper)
converged = jnp.logical_or(stuck_at_border, zero_grad)
return jnp.all(converged)
def find_stationary_softmax_affine(
c_linear: np.ndarray,
c_softmax: np.ndarray,
const_normalization: np.ndarray,
lb: np.ndarray,
ub: np.ndarray,
) -> np.ndarray:
"""Find stationary point of softmax plus linear function.
More specifically, solve for stationary point of
exp(c_softmax ' * x)/(sum(exp(x))+const_normalization) +
c_linear'* x with highest objective value.
Args:
c_linear: (n,) numpy array of linear coefficients.
c_softmax: (n,) numpy array of softmax coefficients.
const_normalization: (1,) numpy array representing constant term
lb: Lower bounds
ub: Upper bounds
Returns:
xopt: Optimal stationary point (None if none exists)
"""
assert _is_zero(c_softmax) or _is_one_hot(c_softmax)
idx = -1
if np.sum(c_softmax) > 0:
idx = np.argmax(c_softmax)
def funx(x):
x = np.reshape(x, [-1])
if idx < 0:
return 1 / (np.sum(np.exp(x)) + const_normalization) + np.sum(
c_linear * x)
else:
return (np.exp(x[idx]) / (np.sum(np.exp(x)) + const_normalization) +
np.sum(c_linear * x))
if idx > 0:
popt = np.zeros_like(c_linear)
if c_linear[idx] < 0. and c_linear[idx] > -.25:
popt_idx_a = .5 * (1 + np.sqrt(1 + 4 * c_linear[idx]))
popt_idx_b = .5 * (1 - np.sqrt(1 + 4 * c_linear[idx]))
else:
return lb
xopts = [lb, ub]
for (i, popt_idx) in enumerate([popt_idx_a, popt_idx_b]):
popt = c_linear / popt_idx
popt[idx] = popt_idx
if np.any(popt < 0.) or np.sum(popt) > 1.:
xopts[i] = lb
else:
const = np.log(const_normalization / (1 - np.sum(popt)))
xopt = const + np.log(popt)
xopt = np.reshape(xopt, lb.shape)
xopts[i] = np.clip(xopt, lb, ub)
if funx(xopts[0]) > funx(xopts[1]):
return xopts[0]
else:
return xopts[1]
else:
radical = 1 - 4. * np.sum(c_linear) * const_normalization
if radical > 0. and np.all(c_linear > 0):
const_a = .5 * (1 + np.sqrt(radical)) / np.sum(c_linear)
const_b = .5 * (1 - np.sqrt(radical)) / np.sum(c_linear)
x_a = np.clip(2 * np.log(const_a) + np.log(c_linear), lb, ub)
x_b = np.clip(2 * np.log(const_b) + np.log(c_linear), lb, ub)
if funx(x_a) > funx(x_b):
xopt = x_a
else:
xopt = x_b
xopt = np.reshape(xopt, lb.shape)
return xopt
return lb
def _truncated_exp(
x: np.ndarray,
x_min: np.ndarray = -20,
x_max: np.ndarray = 20,
) -> np.ndarray: # pytype: disable=annotation-type-mismatch
"""Truncate before exponentiation for numerical stability."""
return np.exp(np.clip(x, x_min, x_max))
def _is_one_hot(x: np.ndarray) -> bool:
return np.all(x * x == x) and np.sum(x) == 1
def _is_zero(x: np.ndarray) -> bool:
return np.all(x == 0)
def upper_bound_softmax_plus_affine_exact(
c_linear: np.ndarray,
c_softmax: np.ndarray,
lb: np.ndarray,
ub: np.ndarray,
epsilon: float = 1e-10,
) -> Tuple[np.ndarray, np.ndarray]:
"""Upper bound exp(c_softmax'*x-logsumexp(x)) + c_linear'* x subject to l <= x <= u.
Args:
c_linear: (n,) numpy array of linear coefficients.
c_softmax: (n,) numpy array of softmax coefficients.
lb: (n,) numpy array of lower bounds.
ub: (n,) numpy array of upper bounds.
epsilon: small constant for numerical stability.
Returns:
objval: optimal value.
xopt: solution found for the upper bound.
"""
assert _is_one_hot(c_softmax)
# flatten all arrays
c_softmax = np.ravel(c_softmax)
c_linear = np.ravel(c_linear)
lb = np.ravel(lb)
ub = np.ravel(ub)
# find index encoded by one-hot softmax
idx = np.argmax(c_softmax)
def objective_fn(x: np.ndarray) -> np.ndarray:
return np.sum(c_softmax * scipy.special.softmax(x)) + np.sum(c_linear * x)
obj_best = -np.inf
num_coordinates = lb.size
# each coordinate of the solution can be either at the lower bound, the upper
# bound, or in the interior feasible domain. Iterate over all possible
# combinations below
for state_coordinates in itertools.product(['interior', 'lower', 'upper'],
repeat=num_coordinates):
in_interior = np.ones([num_coordinates], dtype=bool)
const_normalization = epsilon # zero init + small constant for stability
const_numerator = 1.0
# initialize candidate solution at lower bound
xsol = np.copy(lb)
for j in range(num_coordinates):
if state_coordinates[j] in ('lower', 'upper'):
# coordinate j is at lower bound or upper bound
in_interior[j] = 0
xsol[j] = lb[j] if state_coordinates[j] == 'lower' else ub[j]
# compute exp while taking care of numerical stability
exp_xsol_j = _truncated_exp(xsol[j])
const_normalization += exp_xsol_j
if j == idx:
const_numerator *= exp_xsol_j
if not _is_zero(in_interior):
# solve following problem for coordinates that are `in_interior`:
# max_x [exp(c_softmax^T x) / (sum(exp(x))+ const_normalization)
# + 1/const_numerator * c_linear^T x]
# s.t. lb <= x <= ub.
#
# estimate relative importance of softmax to detect numerical issues
softmax_relative_importance = (
const_numerator / (const_normalization + epsilon))
if softmax_relative_importance > epsilon:
# solve softmax + linear problem if well-conditioned
interior_values = find_stationary_softmax_affine(
c_linear[in_interior] / const_numerator, c_softmax[in_interior],
const_normalization, lb[in_interior], ub[in_interior])
else:
# otherwise softmax can be ignored, solve linear part in closed-form
mask = c_linear[in_interior] > 0
interior_values = mask * ub[in_interior] + (1 - mask) * lb[in_interior]
# update candidate solution with values found at interior
if interior_values is not None:
xsol[in_interior] = interior_values
# project candidate to feasible space and evaluate
xsol = np.clip(xsol, lb, ub)
objective_xsol = objective_fn(xsol)
# keep if best so far
if objective_xsol > obj_best:
obj_best = objective_xsol
xbest = xsol
return obj_best, xbest
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/uncertainty_spec.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inner solvers."""
from jax_verify.extensions.functional_lagrangian.inner_solvers.get_strategy import get_strategy
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solve last layer inner max for probability specification."""
import enum
from typing import Any
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
InnerVerifInstance = verify_utils.InnerVerifInstance
Tensor = jnp.array
class LayerType(enum.Enum):
# `params` represent a network of repeated relu(Wx+b)
# The final output also includes a relu activation, and `obj` composes
# the final layer weights with the original objective
INPUT = 'input'
FIRST = 'first'
class InputUncertaintySpecStrategy(dual_build.InnerMaxStrategy):
"""Strategy for solving inner max at final layer with uncertainty spec."""
def __init__(
self,
layer_type: LayerType,
sig_max: float,
):
"""Constructor.
Args:
layer_type: Indicates whether optimization is over input layer or first
linear layer
sig_max: Maximum standard deviation of input noise
"""
self._layer_type = layer_type
self._eps = 1e-10
self._sig_max = sig_max
return
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
if self._layer_type == LayerType.INPUT:
return self.solve_max_input(inner_dual_vars, opt_instance, key, step)
elif self._layer_type == LayerType.FIRST:
return self.solve_max_first(inner_dual_vars, opt_instance, key, step)
else:
raise ValueError('Unrecognized layer type in input uncertainty spec')
def solve_max_input(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
assert opt_instance.is_first
lb = opt_instance.bounds[0].lb
ub = opt_instance.bounds[0].ub
x = (ub + lb) / 2.
pert = (ub - lb) / 2.
affine_fn, = opt_instance.affine_fns
lam, kappa, gamma = opt_instance.lagrangian_form_post.process_params(
opt_instance.lagrange_params_post)
zeros_pert = jnp.zeros_like(pert)
wconst = affine_fn(x)
gamma = jnp.reshape(gamma, wconst.shape)
gamma_post = jax.grad(lambda x: jnp.sum(gamma * affine_fn(x)))(zeros_pert)
var_term = (
jnp.reshape(gamma_post, [-1]) * jnp.reshape(pert, [-1]) * self._sig_max)
gam_dot_b = jnp.sum(jnp.reshape(gamma, [-1]) * jnp.reshape(wconst, [-1]))
obj = jnp.exp(.5 * jnp.sum(jnp.square(var_term)) + gam_dot_b + kappa)
obj += jnp.sum(jnp.reshape(lam, [-1]) * jnp.reshape(wconst, [-1]))
return obj
def _optt(self, kappa, theta, gamma, lb, ub):
eps = self._eps
def optt(t):
return kappa * jnp.exp(t) + theta * t
tmin = jnp.sum(jnp.minimum(gamma * lb, gamma * ub))
tmax = jnp.sum(jnp.maximum(gamma * lb, gamma * ub))
optt_c = jnp.where(
jnp.abs(kappa) > eps, -theta / jnp.maximum(kappa, eps), jnp.exp(tmin))
optt_c = jnp.where(optt_c > eps, jnp.log(jnp.maximum(optt_c, eps)), tmin)
optt_c = jax.lax.stop_gradient(optt_c)
best_obj_t = jnp.maximum(optt(tmin), optt(tmax))
best_obj_t = jnp.maximum(best_obj_t, optt(optt_c))
return best_obj_t
def solve_max_first(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
theta = jnp.reshape(inner_dual_vars, [-1, 1])
affine_fn, = opt_instance.affine_fns
bounds = opt_instance.bounds
duals_pre, kappa, gamma = opt_instance.lagrangian_form_pre.process_params(
opt_instance.lagrange_params_pre)
duals_post = opt_instance.lagrange_params_post
lb = bounds[0].lb_pre
ub = bounds[0].ub_pre
zero_inputs = jnp.zeros_like(lb)
affine_constant = affine_fn(zero_inputs)
duals_post = jnp.reshape(duals_post, affine_constant.shape)
post_slope_x = jax.grad(lambda x: jnp.sum(affine_fn(x) * duals_post))(
zero_inputs)
post_slope_x = jnp.reshape(post_slope_x, lb.shape)
duals_pre = jnp.reshape(duals_pre, lb.shape)
gamma = jnp.reshape(gamma, lb.shape)
opt_c = jnp.clip(jnp.zeros_like(lb), lb, ub)
def funx(x):
return (post_slope_x * jax.nn.relu(x) -
(duals_pre + jnp.exp(theta) * gamma) * x)
best_obj = jnp.maximum(funx(lb), funx(ub))
best_obj = jnp.sum(jnp.maximum(funx(opt_c), best_obj))
best_obj += jnp.exp(theta) * (theta - 1 - kappa)
best_obj += jnp.dot(
jnp.reshape(affine_constant, [1, -1]), jnp.reshape(duals_post, [-1, 1]))
return jnp.reshape(best_obj, (1,))
def init_layer_inner_params(self, opt_instance):
"""Returns initial inner maximisation duals and their types."""
return jnp.zeros(()), sdp_utils.DualVarTypes.EQUALITY
class ProbabilityThresholdSpecStrategy(dual_build.InnerMaxStrategy):
"""Strategy for solving inner max at final layer with uncertainty spec."""
def __init__(self):
"""Constructor."""
self._eps = 1e-10
return
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
assert opt_instance.is_last
l = opt_instance.bounds[0].lb_pre
u = opt_instance.bounds[0].ub_pre
theta = jnp.reshape(inner_dual_vars, [-1, 1])
def lagr_form(x):
val = opt_instance.lagrangian_form_pre.apply(
x, opt_instance.lagrange_params_pre, step)
return jnp.reshape(val, ())
lagr_form_const = lagr_form(jnp.zeros_like(l))
lagr_form_grad = jax.grad(lagr_form)
lagr_form_param_affine = lagr_form_grad(jnp.zeros_like(l))
affine_obj = lambda x: jnp.reshape(opt_instance.affine_fns[0](x), ())
assert len(opt_instance.affine_fns) == 1
# Extract coefficients of affine_obj
obj = jax.grad(affine_obj)(l)
obj = jnp.reshape(obj, [-1, 1])
l = jnp.reshape(l, [-1, 1])
u = jnp.reshape(u, [-1, 1])
# Extract bias term
obj_bias = -jnp.reshape(lagr_form_const, [-1, 1])
# Solve max_{l <= x <= u} Indicator[obj^T exp(x) >= 0] - lagr_form(x)
obj_a = (
obj_bias + 1 +
self._optx(theta[0] * obj, -lagr_form_param_affine, l, u))
obj_b = (
obj_bias + self._optx(-theta[1] * obj, -lagr_form_param_affine, l, u))
return jnp.reshape(jnp.maximum(obj_a, obj_b), (1,))
def _optx(self, a, b, l, u):
"""Optimize a^T exp(x) + b^T x subject to l <= x <= u."""
a = jnp.reshape(a, [-1, 1])
b = jnp.reshape(b, [-1, 1])
l = jnp.reshape(l, [-1, 1])
u = jnp.reshape(u, [-1, 1])
eps = self._eps
opt_candidate = jnp.where(a > eps, -b / jnp.maximum(a, eps),
jnp.zeros_like(a))
opt_candidate += jnp.where(a < -eps, -b / jnp.minimum(a, -eps),
jnp.zeros_like(a))
opt_candidate = jnp.where(opt_candidate > eps,
jnp.log(jnp.maximum(opt_candidate, eps)), l)
opt_candidate = jax.lax.stop_gradient(jnp.clip(opt_candidate, l, u))
funx = lambda x: a * jnp.exp(x) + b * x
best_obj = jnp.maximum(funx(l), funx(u))
best_obj = jnp.maximum(best_obj, funx(opt_candidate))
return jnp.sum(best_obj)
def init_layer_inner_params(self, opt_instance):
"""Returns initial inner maximisation duals and their types."""
return jnp.zeros((2,)), sdp_utils.DualVarTypes.INEQUALITY
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/input_uncertainty_spec.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of strategies for solving the inner maximization."""
from typing import Any
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import verify_utils
InnerVerifInstance = verify_utils.InnerVerifInstance
class MixedStrategy(dual_build.InnerMaxStrategy):
"""Solves inner maximisations with a combination of solvers."""
def __init__(self, solvers, solver_weights):
self._solvers = solvers
self._solver_weights = solver_weights
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
"""Solve maximization problem of opt_instance with a combination of solvers.
Args:
inner_dual_vars: Dual variables for the inner maximisation.
opt_instance: Verification instance that defines optimization problem to
be solved.
key: Jax PRNG key.
step: outer optimization iteration number.
Returns:
final_value: final value of the objective function found by PGA.
"""
# some renaming to simplify variable names
layer_idx = opt_instance.idx
solver_weights_for_layer = self._solver_weights[layer_idx]
solvers_for_layer = self._solvers[layer_idx]
final_value = 0.
for solver, solver_weight, inner_var in zip(solvers_for_layer,
solver_weights_for_layer,
inner_dual_vars):
final_value += solver_weight * solver.solve_max(inner_var, opt_instance,
key, step)
return final_value # pytype: disable=bad-return-type # jnp-array
def init_layer_inner_params(self, opt_instance):
"""Returns initial inner maximisation duals and their types."""
dual_vars_types = [
solver.init_layer_inner_params(opt_instance)
for solver in self._solvers[opt_instance.idx]
]
return zip(*dual_vars_types)
def supports_stochastic_parameters(self):
for solvers_for_layer in self._solvers:
for solver in solvers_for_layer:
if not solver.supports_stochastic_parameters():
return False
return True
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/mixed.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solving linear problems."""
from typing import Any
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import lagrangian_form as lag_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
InnerVerifInstance = verify_utils.InnerVerifInstance
class LpStrategy(dual_build.InnerMaxStrategy):
"""Solves inner maximisations (for linear Lagrangian) in closed form."""
def supports_stochastic_parameters(self):
# can use expectations of parameters instead of deterministic parameters
return True
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
"""Solve maximization problem of opt_instance in closed form.
Args:
inner_dual_vars: Dual variables for the inner maximisation.
opt_instance: Verification instance that defines optimization problem to
be solved.
key: Jax PRNG key.
step: outer optimization iteration number
Returns:
max_value: final value of the objective function found.
"""
if opt_instance.affine_before_relu:
raise ValueError('LPStratgey requires affine_before_relu to be False.')
if not opt_instance.same_lagrangian_form_pre_post:
raise ValueError('Different lagrangian forms on inputs and outputs not'
'supported')
if (isinstance(opt_instance.lagrangian_form_pre, lag_form.Linear) or
isinstance(opt_instance.lagrangian_form_post, lag_form.Linear)):
pass
else:
raise ValueError('LpStrategy cannot use Lagrangian form of type '
f'{type(opt_instance.lagrangian_form_pre)}.')
# some renaming to simplify variable names
affine_fn, = opt_instance.affine_fns
bounds = opt_instance.bounds
duals_pre = opt_instance.lagrange_params_pre
if (opt_instance.is_last and
opt_instance.spec_type == verify_utils.SpecType.ADVERSARIAL):
# No duals_post for last layer, and objective folded in.
batch_size = bounds[0].lb.shape[0]
duals_post = jnp.ones([batch_size])
else:
duals_post = opt_instance.lagrange_params_post
if opt_instance.is_first:
# no "pre-activation" for input of first layer
lb = bounds[0].lb
ub = bounds[0].ub
else:
lb = bounds[0].lb_pre
ub = bounds[0].ub_pre
zero_inputs = jnp.zeros_like(lb)
affine_constant = affine_fn(zero_inputs)
duals_post = jnp.reshape(duals_post, affine_constant.shape)
post_slope_x = jax.grad(lambda x: jnp.sum(affine_fn(x) * duals_post))(
zero_inputs)
if opt_instance.is_first:
# find max element-wise (separable problem): either at lower bound or
# upper bound -- no duals_pre for first layer
max_per_element = jnp.maximum(
post_slope_x * lb,
post_slope_x * ub,
)
else:
# find max element-wise (separable problem): either at lower bound, 0 or
# upper bound
duals_pre = jnp.reshape(duals_pre, lb.shape)
max_per_element_bounds = jnp.maximum(
post_slope_x * jax.nn.relu(lb) - duals_pre * lb,
post_slope_x * jax.nn.relu(ub) - duals_pre * ub
)
max_per_element = jnp.where(
jnp.logical_and(lb <= 0, ub >= 0),
jax.nn.relu(max_per_element_bounds), # include zero where feasible
max_per_element_bounds) # otherwise only at boundaries
# sum over coordinates and add constant term (does not change max choice)
max_value = jnp.sum(max_per_element,
axis=tuple(range(1, max_per_element.ndim)))
constant_per_element = affine_constant * duals_post
constant = jnp.sum(constant_per_element,
axis=tuple(range(1, constant_per_element.ndim)))
return max_value + constant
def init_layer_inner_params(self, opt_instance):
"""Returns initial inner maximisation duals and their types."""
# no need for auxiliary variables
return None, sdp_utils.DualVarTypes.EQUALITY
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/lp.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inner solvers."""
from jax_verify.extensions.functional_lagrangian.inner_solvers import input_uncertainty_spec
from jax_verify.extensions.functional_lagrangian.inner_solvers import lp
from jax_verify.extensions.functional_lagrangian.inner_solvers import mixed
from jax_verify.extensions.functional_lagrangian.inner_solvers import pga
from jax_verify.extensions.functional_lagrangian.inner_solvers import uncertainty_spec
def get_strategy(config, params, mode):
"""Returns configured strategy for inner maximisation."""
return _build_strategy_recursively(config.inner_opt.get(mode), params)
def _build_strategy_recursively(config_inner_opt, params):
"""Create inner solver strategy (potentially recursively)."""
optim_type = config_inner_opt['optim_type']
if optim_type == 'pga':
strategy = pga.PgaStrategy(
n_iter=config_inner_opt['n_iter'],
lr=config_inner_opt['lr'],
n_restarts=config_inner_opt['n_restarts'],
method=config_inner_opt['method'],
finetune_n_iter=config_inner_opt['finetune_n_iter'],
finetune_lr=config_inner_opt['finetune_lr'],
finetune_method=config_inner_opt['finetune_method'],
normalize=config_inner_opt['normalize'])
elif optim_type == 'lp':
strategy = lp.LpStrategy()
elif optim_type == 'probability_threshold':
strategy = input_uncertainty_spec.ProbabilityThresholdSpecStrategy()
elif optim_type == 'uncertainty':
solve_max = {f.value: f for f in uncertainty_spec.MaxType
}[config_inner_opt.get('solve_max')]
strategy = uncertainty_spec.UncertaintySpecStrategy(
n_iter=config_inner_opt.get('n_iter'),
n_pieces=config_inner_opt.get('n_pieces'),
solve_max=solve_max,
learning_rate=config_inner_opt.get('learning_rate'),
)
elif optim_type == 'uncertainty_input':
layer_type = {f.value: f for f in input_uncertainty_spec.LayerType
}[config_inner_opt.get('layer_type')]
sig_max = config_inner_opt.get('sig_max')
strategy = input_uncertainty_spec.InputUncertaintySpecStrategy(
layer_type=layer_type, sig_max=sig_max)
elif optim_type == 'mixed':
solvers = [[
_build_strategy_recursively(strat, params) for strat in strats_for_layer
] for strats_for_layer in config_inner_opt['mixed_strat']]
strategy = mixed.MixedStrategy(
solvers=solvers, solver_weights=config_inner_opt['solver_weights'])
else:
raise NotImplementedError(
f'Unsupported optim type {config_inner_opt["optim_type"]}')
if (any(p.has_bounds for p in params) and
not strategy.supports_stochastic_parameters()):
# this is a conservative check: we fail if *any* parameter is
# stochastic, although it might not actually be used by strategy
raise ValueError('Inner opt cannot handle stochastic parameters.')
return strategy
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/get_strategy.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Projected gradient ascent."""
from jax_verify.extensions.functional_lagrangian.inner_solvers.pga import pga_strategy
PgaStrategy = pga_strategy.PgaStrategy
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/pga/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
from typing import Callable, Optional, Tuple
import chex
import jax
import jax.numpy as jnp
InitializeFn = Callable[[chex.Array, chex.Array], chex.Array]
ProjectFn = Callable[[chex.Array, chex.Array], chex.Array]
LossFn = Callable[[chex.Array], chex.Array]
def linf_project_fn(epsilon: float, bounds: Tuple[float, float]) -> ProjectFn:
def project_fn(x, origin_x):
dx = jnp.clip(x - origin_x, -epsilon, epsilon)
return jnp.clip(origin_x + dx, bounds[0], bounds[1])
return project_fn
def bounded_initialize_fn(
bounds: Optional[Tuple[chex.Array, chex.Array]] = None,) -> InitializeFn:
"""Returns an initialization function."""
if bounds is None:
return noop_initialize_fn()
else:
lower_bound, upper_bound = bounds
def _initialize_fn(rng, x):
a = jax.random.uniform(rng, x.shape, minval=0., maxval=1.)
x = a * lower_bound + (1. - a) * upper_bound
return x
return _initialize_fn
def noop_initialize_fn() -> InitializeFn:
def _initialize_fn(rng, x):
del rng
return x
return _initialize_fn
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/pga/utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Projected gradient ascent."""
import dataclasses
from typing import Any, Text
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian import dual_build
from jax_verify.extensions.functional_lagrangian import lagrangian_form as lag_form
from jax_verify.extensions.functional_lagrangian import verify_utils
from jax_verify.extensions.functional_lagrangian.inner_solvers.pga import optimizer as optimizer_module
from jax_verify.extensions.functional_lagrangian.inner_solvers.pga import square
from jax_verify.extensions.functional_lagrangian.inner_solvers.pga import utils
from jax_verify.extensions.sdp_verify import utils as sdp_utils
import numpy as np
InnerVerifInstance = verify_utils.InnerVerifInstance
Params = verify_utils.Params
ParamsTypes = verify_utils.ParamsTypes
LagrangianForm = lag_form.LagrangianForm
class PgaStrategy(dual_build.InnerMaxStrategy):
"""Solves inner maximisations with projected gradient ascent."""
def __init__(self,
n_iter: int,
lr: float = 1.,
n_restarts: int = 1.,
method: Text = 'pgd',
finetune_n_iter: int = 0,
finetune_lr: float = 1.,
finetune_method: Text = 'pgd',
normalize: bool = False): # pytype: disable=annotation-type-mismatch
"""Constructor.
Args:
n_iter: number of iterations of PGA to be performed.
lr: learning-rate (or multiplier when adative, is kept constant).
n_restarts: number of restarts.
method: 'pgd', 'autopgd' or 'square'.
finetune_n_iter: number of iterations of PGA to be performed after the
initial optimization.
finetune_lr: learning-rate when finetuning.
finetune_method: 'pgd', 'autopgd'.
normalize: whether to normalise inputs before PGA.
"""
self._n_iter = n_iter
self._lr = lr
self._n_restarts = n_restarts
self._method = method
self._finetune_n_iter = finetune_n_iter
self._finetune_lr = finetune_lr
self._finetune_method = finetune_method
self._normalize = normalize
def _build_optimizer(self, method, n_iter, lr, lower_bound, upper_bound):
epsilon = jnp.max(upper_bound - lower_bound) / 2
if method == 'square':
init_fn = utils.bounded_initialize_fn(bounds=(lower_bound, upper_bound))
return square.Square(
num_steps=n_iter,
epsilon=epsilon,
bounds=(lower_bound, upper_bound),
initialize_fn=init_fn)
elif method == 'pgd':
init_fn = utils.noop_initialize_fn()
project_fn = utils.linf_project_fn(
epsilon=epsilon, bounds=(lower_bound, upper_bound))
optimizer = optimizer_module.IteratedFGSM(lr)
return optimizer_module.PGD(optimizer, n_iter, init_fn, project_fn)
else:
raise ValueError(f'Unknown method: "{method}"')
def supports_stochastic_parameters(self):
# This solver can be used with stochastic parameters (it will use the mean
# and treat the problem as a deterministic one).
return True
def solve_max(
self,
inner_dual_vars: Any,
opt_instance: InnerVerifInstance,
key: jnp.ndarray,
step: int,
) -> jnp.ndarray:
"""Solve maximization problem of opt_instance with projected gradient ascent.
Args:
inner_dual_vars: Dual variables for the inner maximisation.
opt_instance: Verification instance that defines optimization problem to
be solved.
key: Jax PRNG key.
step: outer optimization iteration number.
Returns:
final_value: final value of the objective function found by PGA.
"""
if not opt_instance.same_lagrangian_form_pre_post:
raise ValueError('Different lagrangian forms on inputs and outputs not'
'supported')
# only supporting adversarial robustness specification for now
# when affine_before_relu and logits layer.
affine_before_relu = opt_instance.affine_before_relu
assert not (opt_instance.spec_type == verify_utils.SpecType.UNCERTAINTY and
opt_instance.is_last and affine_before_relu)
# some renaming to simplify variable names
if affine_before_relu:
lower_bound = opt_instance.bounds[0].lb
upper_bound = opt_instance.bounds[0].ub
else:
lower_bound = opt_instance.bounds[0].lb_pre
upper_bound = opt_instance.bounds[0].ub_pre
assert lower_bound.shape[0] == 1, 'Batching across samples not supported'
if self._normalize:
center = .5 * (upper_bound + lower_bound)
radius = .5 * (upper_bound - lower_bound)
normalize_fn = lambda x: x * radius + center
lower_bound = -jnp.ones_like(lower_bound)
upper_bound = jnp.ones_like(lower_bound)
else:
normalize_fn = lambda x: x
duals_pre = opt_instance.lagrange_params_pre
duals_post = opt_instance.lagrange_params_post
# dual variables never used for grad tracing
duals_pre_nondiff = jax.lax.stop_gradient(duals_pre)
duals_post_nondiff = jax.lax.stop_gradient(duals_post)
# Define the loss function.
if (opt_instance.spec_type == verify_utils.SpecType.UNCERTAINTY and
opt_instance.is_last):
# Last layer here isn't the final spec layer, treat like other layers
new_opt_instance = dataclasses.replace(opt_instance, is_last=False)
else:
new_opt_instance = opt_instance
softmax = (
opt_instance.spec_type == verify_utils.SpecType.ADVERSARIAL_SOFTMAX and
opt_instance.is_last)
obj = self.build_spec(new_opt_instance, step, softmax)
def loss_pgd(x):
# Expects x without batch dimension, as vmap adds batch-dimension.
x = jnp.reshape(x, lower_bound.shape)
x = normalize_fn(x)
v = obj(x, duals_pre_nondiff, duals_post_nondiff)
return -v
loss_pgd = jax.vmap(loss_pgd)
# Compute shape for compatibility with blackbox 'square' attack.
if jnp.ndim(lower_bound) == 2 and self._method == 'square':
d = lower_bound.shape[1]
max_h = int(np.round(np.sqrt(d)))
for h in range(max_h, 0, -1):
w, ragged = divmod(d, h)
if ragged == 0:
break
assert d == h * w
shape = [1, h, w, 1]
else:
shape = lower_bound.shape
# Optimization.
init_x = (upper_bound + lower_bound) / 2
init_x = jnp.reshape(init_x, shape)
optimizer = self._build_optimizer(self._method, self._n_iter, self._lr,
jnp.reshape(lower_bound, shape),
jnp.reshape(upper_bound, shape))
if self._n_restarts > 1:
optimizer = optimizer_module.Restarted(
optimizer, restarts_using_tiling=self._n_restarts)
key, next_key = jax.random.split(key)
x = optimizer(loss_pgd, key, init_x)
if self._finetune_n_iter > 0:
optimizer = self._build_optimizer(self._finetune_method,
self._finetune_n_iter,
self._finetune_lr,
jnp.reshape(lower_bound, shape),
jnp.reshape(upper_bound, shape))
x = optimizer(loss_pgd, next_key, x)
# compute final value and return it
x = normalize_fn(jnp.reshape(x, lower_bound.shape))
final_value = obj(
jax.lax.stop_gradient(x), # non-differentiable
duals_pre,
duals_post # differentiable
)
return final_value
def init_layer_inner_params(self, opt_instance):
"""Returns initial inner maximisation duals and their types."""
# pga does not require extra variables
return None, sdp_utils.DualVarTypes.EQUALITY
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/pga/pga_strategy.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizers used in the PGA strategy."""
import collections
from typing import Callable, Optional, Tuple
import chex
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian.inner_solvers.pga import utils
_State = collections.namedtuple('State', ['iteration', 'rng', 'state']) # pylint: disable=invalid-name
def grad_fn(
loss_fn: utils.LossFn,
) -> Callable[[chex.Array], Tuple[chex.Array, chex.Array]]:
"""Returns the analytical gradient as computed by `jax.grad`."""
def reduced_loss_fn(x):
loss = loss_fn(x)
return jnp.sum(loss), loss
return jax.grad(reduced_loss_fn, has_aux=True)
class IteratedFGSM:
"""L-infinity normalized steps."""
def __init__(self, learning_rate: chex.Numeric):
self._learning_rate = learning_rate
def init(self, loss_fn: utils.LossFn, rng: chex.PRNGKey,
x: chex.Array) -> _State:
del x
self._loss_fn = loss_fn
return _State(jnp.array(0, dtype=jnp.int32), rng, ())
def minimize(self, x: chex.Array,
state: _State) -> Tuple[chex.Array, chex.Array, _State]:
"""Performs a single minimization step."""
lr = jnp.array(self._learning_rate)
g, loss = grad_fn(self._loss_fn)(x)
if g is None:
raise ValueError('loss_fn does not depend on input.')
g = jnp.sign(g)
g, s = self._update(lr, g, state.state)
new_state = _State(state.iteration + 1, state.rng, s)
return x - g, loss, new_state
def _update(
self,
learning_rate: chex.Numeric,
gradients: chex.Array,
state: chex.Array,
) -> Tuple[chex.Array, chex.Array]:
return learning_rate.astype(gradients.dtype) * gradients, state # pytype: disable=attribute-error # numpy-scalars
class PGD:
"""Uses the above defined optimizers to minimize and loss function."""
def __init__(
self,
optimizer,
num_steps: int,
initialize_fn: Optional[utils.InitializeFn] = None,
project_fn: Optional[utils.ProjectFn] = None,
):
self._optimizer = optimizer
if initialize_fn is None:
initialize_fn = lambda rng, x: x
self._initialize_fn = initialize_fn
if project_fn is None:
project_fn = lambda x, origin_x: x
self._project_fn = project_fn
self._num_steps = num_steps
def __call__(
self,
loss_fn: utils.LossFn,
rng: chex.PRNGKey,
x: chex.Array,
) -> chex.Array:
def _optimize(rng, x):
"""Optimizes loss_fn."""
def body_fn(_, inputs):
opt_state, current_x = inputs
current_x, _, opt_state = self._optimizer.minimize(current_x, opt_state)
current_x = self._project_fn(current_x, x)
return opt_state, current_x
rng, next_rng = jax.random.split(rng)
opt_state = self._optimizer.init(loss_fn, next_rng, x)
current_x = self._project_fn(self._initialize_fn(rng, x), x)
_, current_x = jax.lax.fori_loop(0, self._num_steps, body_fn,
(opt_state, current_x))
return current_x
x = _optimize(rng, x)
return jax.lax.stop_gradient(x)
class Restarted:
"""Repeats an optimization multiple times."""
def __init__(
self,
optimizer,
restarts_using_tiling: int = 1,
has_batch_dim: bool = True,
):
self._wrapped_optimizer = optimizer
if (isinstance(restarts_using_tiling, int) and restarts_using_tiling > 1 and
not has_batch_dim):
raise ValueError('Cannot use tiling when `has_batch_dim` is False.')
self._has_batch_dim = has_batch_dim
if (isinstance(restarts_using_tiling, int) and restarts_using_tiling < 1):
raise ValueError('Fewer than one restart requested.')
self._restarts_using_tiling = restarts_using_tiling
def __call__(
self,
loss_fn: utils.LossFn,
rng: chex.PRNGKey,
inputs: chex.Array,
) -> chex.Array:
"""Performs an optimization multiple times by tiling the inputs."""
if not self._has_batch_dim:
opt_inputs = self._wrapped_optimizer(loss_fn, rng, inputs)
opt_losses = loss_fn(opt_inputs)
return opt_inputs, opt_losses # pytype: disable=bad-return-type # numpy-scalars
# Tile the inputs and labels.
batch_size = inputs.shape[0]
# Tile inputs.
shape = inputs.shape[1:]
# Shape is [num_restarts * batch_size, ...].
inputs = jnp.tile(inputs, [self._restarts_using_tiling] + [1] * len(shape))
# Optimize.
opt_inputs = self._wrapped_optimizer(loss_fn, rng, inputs)
opt_losses = loss_fn(opt_inputs)
opt_losses = jnp.reshape(opt_losses,
[self._restarts_using_tiling, batch_size])
# Extract best.
i = jnp.argmin(opt_losses, axis=0)
j = jnp.arange(batch_size)
shape = opt_inputs.shape[1:]
return jnp.reshape(opt_inputs,
(self._restarts_using_tiling, batch_size) + shape)[i, j]
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/pga/optimizer.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Square (https://arxiv.org/pdf/1912.00049)."""
from typing import Callable, List, Tuple
import chex
import jax
import jax.numpy as jnp
from jax_verify.extensions.functional_lagrangian.inner_solvers.pga import utils
def _schedule(values: List[float],
boundaries: List[int],
dtype=jnp.float32) -> Callable[[chex.Array], chex.Numeric]:
"""Schedule the value of p, the proportion of elements to be modified."""
large_step = max(boundaries) + 1
boundaries = boundaries + [large_step, large_step + 1]
num_values = len(values)
values = jnp.array(values, dtype=jnp.float32)
large_step = jnp.array([large_step] * len(boundaries), dtype=jnp.int32)
boundaries = jnp.array(boundaries, dtype=jnp.int32)
def _get(step):
"""Returns the value according to the current step and schedule."""
b = boundaries - jnp.minimum(step + 1, large_step + 1)
b = jnp.where(b < 0, large_step, b)
idx = jnp.minimum(jnp.argmin(b), num_values - 1)
return values[idx].astype(dtype)
return _get
class Square:
"""Performs a blackbox optimization as in https://arxiv.org/pdf/1912.00049."""
def __init__(
self,
num_steps: int,
epsilon: chex.Numeric,
initialize_fn: utils.InitializeFn,
bounds: Tuple[chex.ArrayTree, chex.ArrayTree],
):
"""Creates a Square attack."""
self._num_steps = num_steps
self._initialize_fn = initialize_fn
self._project_fn = utils.linf_project_fn(epsilon=epsilon, bounds=bounds)
self._epsilon = epsilon
self._p_init = p = .8
self._p_schedule = _schedule([
p, p / 2, p / 4, p / 4, p / 8, p / 16, p / 32, p / 64, p / 128, p / 256,
p / 512
], [10, 50, 200, 500, 1000, 2000, 4000, 6000, 8000])
def __call__(
self,
loss_fn: utils.LossFn,
rng: chex.PRNGKey,
x: chex.Array,
) -> chex.Array:
if len(x.shape) != 4:
raise ValueError(f'Unsupported tensor shape: {x.shape}')
h, w, c = x.shape[1:]
batch_size = x.shape[0]
broadcast_shape = [batch_size] + [1] * (len(x.shape) - 1)
min_size = 1
def init_fn(rng):
init_x = self._project_fn(self._initialize_fn(rng, x), x)
init_loss = loss_fn(init_x)
return init_x, init_loss
def random_window_mask(rng, size, dtype):
height_rng, width_rng = jax.random.split(rng)
height_offset = jax.random.randint(
height_rng,
shape=(batch_size, 1, 1, 1),
minval=0,
maxval=h - size,
dtype=jnp.int32)
width_offset = jax.random.randint(
width_rng,
shape=(batch_size, 1, 1, 1),
minval=0,
maxval=w - size,
dtype=jnp.int32)
h_range = jnp.reshape(jnp.arange(h), [1, h, 1, 1])
w_range = jnp.reshape(jnp.arange(w), [1, 1, w, 1])
return jnp.logical_and(
jnp.logical_and(height_offset <= h_range,
h_range < height_offset + size),
jnp.logical_and(width_offset <= w_range,
w_range < width_offset + size)).astype(dtype)
def random_linf_perturbation(rng, x, size):
rng, perturbation_rng = jax.random.split(rng)
perturbation = jax.random.randint(
perturbation_rng, shape=(batch_size, 1, 1, c), minval=0,
maxval=2) * 2 - 1
return random_window_mask(rng, size, x.dtype) * perturbation
def body_fn(i, loop_inputs):
best_x, best_loss, rng = loop_inputs
p = self._get_p(i)
size = jnp.maximum(
jnp.round(jnp.sqrt(p * h * w / c)).astype(jnp.int32), min_size)
rng, next_rng = jax.random.split(rng)
perturbation = random_linf_perturbation(next_rng, best_x, size)
current_x = best_x + perturbation * self._epsilon
current_x = self._project_fn(current_x, x)
loss = loss_fn(current_x)
cond = loss < best_loss
best_x = jnp.where(jnp.reshape(cond, broadcast_shape), current_x, best_x)
best_loss = jnp.where(cond, loss, best_loss)
return best_x, best_loss, rng
rng, next_rng = jax.random.split(rng)
best_x, best_loss = init_fn(next_rng)
loop_inputs = (best_x, best_loss, rng)
return jax.lax.fori_loop(0, self._num_steps, body_fn, loop_inputs)[0]
def _get_p(self, step):
"""Schedule on `p`."""
step = step / self._num_steps * 10000.
return self._p_schedule(jnp.array(step))
| jax_verify-master | jax_verify/extensions/functional_lagrangian/inner_solvers/pga/square.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.