python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_uninitialized_test.py |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_color_test.py |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_break_on_failure_unittest.py |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_help_test.py |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_shuffle_test.py |
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_filter_unittest.py |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_test_utils.py |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_catch_exceptions_test.py |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "[email protected] (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO([email protected]): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_xml_outfiles_test.py |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO([email protected]): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_output_test.py |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = '[email protected] (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_xml_test_utils.py |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_throw_on_failure_test.py |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = '[email protected] (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_list_tests_unittest.py |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_env_var_test.py |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| dimmwitted-master | lib/gtest-1.7.0/scripts/pump.py |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to [email protected]. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO([email protected]): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| dimmwitted-master | lib/gtest-1.7.0/scripts/fuse_gtest_files.py |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email [email protected] if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| dimmwitted-master | lib/gtest-1.7.0/scripts/gen_gtest_pred_impl.py |
import argparse
from distutils.command.config import config
import yaml
import itertools
import os
import importlib
import time
import tqdm
import numpy as np
import subprocess
from copy import deepcopy
GPUS = {'v100': 'nvidia-tesla-v100', 'p100': 'nvidia-tesla-p100', 'a100': 'nvidia-tesla-a100', 't4': 'nvidia-tesla-t4'}
def load_base_job_config():
return yaml.load(open('gcp/job.yaml'), Loader=yaml.FullLoader)
def startup_commands(use_gdb):
cmds = ['cd /home',
'export HOME="/home"',
'source /venv/bin/activate',
# insert pip install or apt-get commands here if any
'pip install git+https://github.com/krandiash/quinine.git --upgrade',
'eval `ssh-agent -s`',
'ssh-add /home/.ssh/id_rsa_stanford',
# 'cp /home/.ssh/id_rsa ~/.ssh/',
# 'ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts',
# "eval `ssh-agent -s`",
# 'ssh-add /home/.ssh/id_rsa',
'bash /home/.wandb/auth',
'cd code/mistral',
# 'git pull origin main',
]
if not use_gdb:
# run_cmd = f'python {method_to_run}.py --config {config_path}'
run_cmd = f'bash scripts/mistral-gcp-gpt2-small.sh MODEL=downpour RESUME=true'
else:
# run_cmd = f'gdb -ex -r -ex backtrace full --args python {method_to_run}.py --config {config_path}'
run_cmd = f'gdb -ex -r -ex backtrace full --args python -m train' \
f' --config conf/gpt2-gcp.yaml'
cmds.append(run_cmd)
return cmds
def create_job_config(job_key, job_id, store_path, use_gdb, gpu):
# Load up a base config
base_config = load_base_job_config()
# Modify it
base_config['metadata']['name'] += f'-{job_key}-{job_id}'
base_config['spec']['template']['spec']['containers'][0]['name'] += f'-{job_key}-{job_id}'
# Add in the startup commands
base_config['spec']['template']['spec']['containers'][0]['command'] = ['/bin/zsh', '-c']
startup_cmds = startup_commands(use_gdb)
base_config['spec']['template']['spec']['containers'][0]['args'] = [' && '.join(startup_cmds)]
# Modify the GPU
base_config['spec']['template']['spec']['nodeSelector']['cloud.google.com/gke-accelerator'] = GPUS[gpu]
base_config['spec']['template']['spec']['nodeSelector']['cloud.google.com/gke-nodepool'] = f'sharky'
# Store this configuration
yaml.dump(base_config, open(store_path, 'w'))
return base_config
def launch_kubernetes_job(path):
# Execute a job
subprocess.run(['kubectl', 'create', '-f', f'{path}'])
def git_push(path):
cmds = [['git', 'add', f'{path}/*'],
['git', 'commit', '-m', 'cfgupdates'],
['git', 'push']]
for cmd in cmds:
subprocess.run(cmd)
def determine_param_combinations(config):
# Take as input a dictionary configuration and identify the parameters and the values we're sweeping over
grid_parameters_to_sweep = {}
zip_parameters_to_sweep = {}
for parameter in config:
if type(config[parameter]) == dict and 'values' in config[parameter]:
assert 'type' in config[parameter], 'Make sure _type_ is present if _values_ is present in the config.'
if config[parameter]['type'] == 'grid':
grid_parameters_to_sweep[parameter] = config[parameter]['values']
elif config[parameter]['type'] == 'zip':
zip_parameters_to_sweep[parameter] = config[parameter]['values']
return grid_parameters_to_sweep, zip_parameters_to_sweep
def laurel_test_launch(args):
store_path = "gcp/"
i = 0
job_key = int(time.time())
job_yaml_path = f'{store_path}/job_downpour.yaml'
create_job_config(job_key=job_key,
job_id=i + 1,
store_path=job_yaml_path,
use_gdb=args.gdb,
gpu=args.gpu)
# Append to the queue of jobs we're running
# launch_kubernetes_job(job_yaml_path)
def launch(args):
# Ensure that the sweep config file is a .yaml
assert args.sweep_config.endswith('.yaml'), 'You need a .yaml file for the sweep configuration.'
# Load up the sweep configuration file and create a storage path
config = yaml.load(open(args.sweep_config), Loader=yaml.FullLoader)
store_path = args.sweep_config.rstrip(".yaml") + '_param_combos'
os.makedirs(store_path, exist_ok=True)
# Figure out the module this sweep is targeting: what's the .py file being run
try:
module_to_run = config['general']['module']
except:
print("Please ensure that the config contains a general.module parameter indicating the module to be run.")
raise
# Load up the schema for the module that needs to be executed
try:
schema = getattr(importlib.import_module(module_to_run), 'get_schema')()
except:
print('Unable to find a get_schema() fn inside the module being executed. '
'Please define a get_schema() fn and continue.')
raise
# Recursively resolve this config to remove dependencies on parent configs
# Create a unique key for all these jobs
job_key = int(time.time())
# Determine the parameters we'll be sweeping over
grid_parameters_to_sweep, zip_parameters_to_sweep = determine_param_combinations(config)
# Zip up all the parameter combinations
sweep_lengths = np.array([len(e) for e in zip_parameters_to_sweep.values()])
assert np.all(sweep_lengths == sweep_lengths[0]), 'Length of values should be equal.'
parameter_combinations = list(zip(*zip_parameters_to_sweep.values()))
# Grid combination of the rest of the parameter combinations
parameter_combinations = list(itertools.product(*grid_parameters_to_sweep.values(), parameter_combinations))
parameter_combinations = [list(e[:-1]) + list(e[-1]) for e in parameter_combinations]
# Keep track of whatever job manifests (.yaml) we're generating
# A single job will run a single configuration to completion
job_yaml_paths = []
# Go over each parameter configuration
for i, choice in enumerate(parameter_combinations):
# Create a new configuration that uses this combination of parameters
parameter_choices = dict(
zip(list(grid_parameters_to_sweep.keys()) + list(zip_parameters_to_sweep.keys()), choice))
new_config = deepcopy(config)
new_config.update(parameter_choices)
# Dump this new configuration
config_path = f'{store_path}/config_{i + 1}.yaml'
yaml.dump(new_config, open(config_path, 'w'))
# Create a job configuration to run this
job_yaml_path = f'{store_path}/job_{i + 1}.yaml'
create_job_config(job_key=job_key,
job_id=i + 1,
method_to_run=method_to_run,
file_to_run=file_to_run,
config_path=config_path,
store_path=job_yaml_path,
use_gdb=args.gdb,
gpu=args.gpu,
pool=args.pool)
# Append to the queue of jobs we're running
job_yaml_paths.append(job_yaml_path)
# Git push
print("####################################################################################")
print("Pushing to Git!")
print("####################################################################################")
git_push(store_path)
# Launch all the Kubernetes jobs
if args.run or args.tentative:
print("####################################################################################")
print(f"Launching {'all the' if args.run else 'one of the'} Kubernetes jobs!")
print("####################################################################################")
for path in tqdm.tqdm(job_yaml_paths):
launch_kubernetes_job(path)
if args.tentative:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sweep_config', '-c', help='Path to the sweep configuration file.')
parser.add_argument('--tentative', '-t', action='store_true', help="Launch just a single job.")
parser.add_argument('--run', '-r', action='store_true', help="Launch the jobs as well.")
parser.add_argument('--gpu', '-g', type=str, choices=['v100', 'p100', 't4', 'a100'], required=True)
parser.add_argument('--pool', '-p', type=int, choices=[1, 2, 3, 4, 5], default=None)
parser.add_argument('--gdb', action='store_true', help='Execute with gdb.')
args = parser.parse_args()
# Launch the parameter sweep
laurel_test_launch(args)
| gcp-cloud-nine-master | gcp/run_jobs.py |
import argparse
import yaml
import subprocess
import os
import sys
GPUS = {'v100': 'nvidia-tesla-v100',
'p100': 'nvidia-tesla-p100',
't4': 'nvidia-tesla-t4'}
def startup_commands():
cmds = ['cd /home/workspace/projects/muscaria/',
# insert pip install or apt-get commands here if any
'pip install --upgrade git+https://github.com/tensorpack/dataflow.git',
'pip install tensorflow-datasets',
'pip install git+https://github.com/PetrochukM/PyTorch-NLP.git',
'pip install transformers',
'pip install --upgrade wandb gin-config cytoolz funcy munch cerberus pytorch-ignite',
'pip install --upgrade git+https://github.com/aleju/imgaug.git',
'apt-get -y install fish',
# 'apt-get -y update',
'apt-get -y install libxext6 libx11-6 libxrender1 libxtst6 libxi6 libxml2 libglib2.0-0 gdb',
'bash /home/.wandb/auth',
'mkdir ~/.ssh',
'cp /home/.ssh/noaug_id_rsa ~/.ssh/',
'git config --global user.email "[email protected]"',
'git config --global user.name "hazymturk"',
'sleep infinity']
return cmds
def launch_pod(name, gpu, cpu, pool, image):
# Load the base manifest for launching Pods
config = yaml.load(open('aws/pod.yaml'))
# Modify it
if gpu != 'none':
config['spec']['nodeSelector']['cloud.google.com/gke-accelerator'] = GPUS[gpu]
if pool:
config['spec']['nodeSelector']['cloud.google.com/gke-nodepool'] = f'gpu-pool-{pool}'
else:
assert pool is not None, "Specify the pool if you're not using a GPU."
# Wipe out the GPU node selector
config['spec']['nodeSelector'] = {}
# Specify the pool
config['spec']['nodeSelector']['cloud.google.com/gke-nodepool'] = f'pool-{pool}'
# Wipe out the GPU request
config['spec']['containers'][0]['resources'] = {'limits': {}, 'requests': {}}
if cpu:
# Put in a CPU request
config['spec']['containers'][0]['resources']['limits']['cpu'] = cpu
config['spec']['containers'][0]['resources']['requests']['cpu'] = cpu
# Set the name of the Pod
config['metadata']['name'] = config['spec']['containers'][0]['name'] = name
# Set the name of the image we want the Pod to run
config['spec']['containers'][0]['image'] = image
# Put in a bunch of startup commands
config['spec']['containers'][0]['command'] = ['/bin/sh', '-c']
config['spec']['containers'][0]['args'] = [' && '.join(startup_commands())]
# Store it
yaml.dump(config, open('temp.yaml', 'w'))
# Log
print("Launching a pod...")
print(f"Name: {name}\tGPU: {gpu}\tImage: {image}")
print("###################################################################################")
# Launch the Pod
subprocess.call('kubectl apply -f temp.yaml', shell=True)
# Clean up
os.remove('temp.yaml')
def main(args):
if args.resource == 'pod':
launch_pod(args.name, args.gpu, args.cpu, args.pool, args.image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resource', '-r', type=str, choices=['pod', 'job'], required=True)
parser.add_argument('--gpu', '-g', type=str, choices=['v100', 'p100', 't4', 'none'], required=True)
parser.add_argument('--cpu', '-c', type=int, default=None)
parser.add_argument('--pool', '-p', type=int, choices=[1, 2, 3, 4], default=None)
parser.add_argument('--name', '-n', type=str, required=True)
parser.add_argument('--image', '-im', type=str, default='gcr.io/data-aug/img_tf2_torch')
args = parser.parse_args()
main(args)
| gcp-cloud-nine-master | gcp/launch.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torchvision
from transformers import BertForSequenceClassification, AdamW, get_scheduler
class ToyNet(torch.nn.Module):
def __init__(self, dim, gammas):
super(ToyNet, self).__init__()
# gammas is a list of three the first dimension determines how fast the
# spurious feature is learned the second dimension determines how fast
# the core feature is learned and the third dimension determines how
# fast the noise features are learned
self.register_buffer(
"gammas", torch.tensor([gammas[:2] + gammas[2:] * (dim - 2)])
)
self.fc = torch.nn.Linear(dim, 1, bias=False)
self.fc.weight.data = 0.01 / self.gammas * self.fc.weight.data
def forward(self, x):
return self.fc((x * self.gammas).float()).squeeze()
class BertWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x):
return self.model(
input_ids=x[:, :, 0],
attention_mask=x[:, :, 1],
token_type_ids=x[:, :, 2]).logits
def get_bert_optim(network, lr, weight_decay):
no_decay = ["bias", "LayerNorm.weight"]
decay_params = []
nodecay_params = []
for n, p in network.named_parameters():
if any(nd in n for nd in no_decay):
decay_params.append(p)
else:
nodecay_params.append(p)
optimizer_grouped_parameters = [
{
"params": decay_params,
"weight_decay": weight_decay,
},
{
"params": nodecay_params,
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=lr,
eps=1e-8)
return optimizer
def get_sgd_optim(network, lr, weight_decay):
return torch.optim.SGD(
network.parameters(),
lr=lr,
weight_decay=weight_decay,
momentum=0.9)
class ERM(torch.nn.Module):
def __init__(self, hparams, dataloader):
super().__init__()
self.hparams = dict(hparams)
dataset = dataloader.dataset
self.n_batches = len(dataloader)
self.data_type = dataset.data_type
self.n_classes = len(set(dataset.y))
self.n_groups = len(set(dataset.g))
self.n_examples = len(dataset)
self.last_epoch = 0
self.best_selec_val = 0
self.init_model_(self.data_type)
def init_model_(self, data_type, text_optim="sgd"):
self.clip_grad = text_optim == "adamw"
optimizers = {
"adamw": get_bert_optim,
"sgd": get_sgd_optim
}
if data_type == "images":
self.network = torchvision.models.resnet.resnet50(pretrained=True)
self.network.fc = torch.nn.Linear(
self.network.fc.in_features, self.n_classes)
self.optimizer = optimizers['sgd'](
self.network,
self.hparams['lr'],
self.hparams['weight_decay'])
self.lr_scheduler = None
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
elif data_type == "text":
self.network = BertWrapper(
BertForSequenceClassification.from_pretrained(
'bert-base-uncased', num_labels=self.n_classes))
self.network.zero_grad()
self.optimizer = optimizers[text_optim](
self.network,
self.hparams['lr'],
self.hparams['weight_decay'])
num_training_steps = self.hparams["num_epochs"] * self.n_batches
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps)
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
elif data_type == "toy":
gammas = (
self.hparams['gamma_spu'],
self.hparams['gamma_core'],
self.hparams['gamma_noise'])
self.network = ToyNet(self.hparams['dim_noise'] + 2, gammas)
self.optimizer = optimizers['sgd'](
self.network,
self.hparams['lr'],
self.hparams['weight_decay'])
self.lr_scheduler = None
self.loss = lambda x, y:\
torch.nn.BCEWithLogitsLoss(reduction="none")(x.squeeze(),
y.float())
self.cuda()
def compute_loss_value_(self, i, x, y, g, epoch):
return self.loss(self.network(x), y).mean()
def update(self, i, x, y, g, epoch):
x, y, g = x.cuda(), y.cuda(), g.cuda()
loss_value = self.compute_loss_value_(i, x, y, g, epoch)
if loss_value is not None:
self.optimizer.zero_grad()
loss_value.backward()
if self.clip_grad:
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.0)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
if self.data_type == "text":
self.network.zero_grad()
loss_value = loss_value.item()
self.last_epoch = epoch
return loss_value
def predict(self, x):
return self.network(x)
def accuracy(self, loader):
nb_groups = loader.dataset.nb_groups
nb_labels = loader.dataset.nb_labels
corrects = torch.zeros(nb_groups * nb_labels)
totals = torch.zeros(nb_groups * nb_labels)
self.eval()
with torch.no_grad():
for i, x, y, g in loader:
predictions = self.predict(x.cuda())
if predictions.squeeze().ndim == 1:
predictions = (predictions > 0).cpu().eq(y).float()
else:
predictions = predictions.argmax(1).cpu().eq(y).float()
groups = (nb_groups * y + g)
for gi in groups.unique():
corrects[gi] += predictions[groups == gi].sum()
totals[gi] += (groups == gi).sum()
corrects, totals = corrects.tolist(), totals.tolist()
self.train()
return sum(corrects) / sum(totals),\
[c/t for c, t in zip(corrects, totals)]
def load(self, fname):
dicts = torch.load(fname)
self.last_epoch = dicts["epoch"]
self.load_state_dict(dicts["model"])
self.optimizer.load_state_dict(dicts["optimizer"])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(dicts["scheduler"])
def save(self, fname):
lr_dict = None
if self.lr_scheduler is not None:
lr_dict = self.lr_scheduler.state_dict()
torch.save(
{
"model": self.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": lr_dict,
"epoch": self.last_epoch,
"best_selec_val": self.best_selec_val,
},
fname,
)
class GroupDRO(ERM):
def __init__(self, hparams, dataset):
super(GroupDRO, self).__init__(hparams, dataset)
self.register_buffer(
"q", torch.ones(self.n_classes * self.n_groups).cuda())
def groups_(self, y, g):
idx_g, idx_b = [], []
all_g = y * self.n_groups + g
for g in all_g.unique():
idx_g.append(g)
idx_b.append(all_g == g)
return zip(idx_g, idx_b)
def compute_loss_value_(self, i, x, y, g, epoch):
losses = self.loss(self.network(x), y)
for idx_g, idx_b in self.groups_(y, g):
self.q[idx_g] *= (
self.hparams["eta"] * losses[idx_b].mean()).exp().item()
self.q /= self.q.sum()
loss_value = 0
for idx_g, idx_b in self.groups_(y, g):
loss_value += self.q[idx_g] * losses[idx_b].mean()
return loss_value
class JTT(ERM):
def __init__(self, hparams, dataset):
super(JTT, self).__init__(hparams, dataset)
self.register_buffer(
"weights", torch.ones(self.n_examples, dtype=torch.long).cuda())
def compute_loss_value_(self, i, x, y, g, epoch):
if epoch == self.hparams["T"] + 1 and\
self.last_epoch == self.hparams["T"]:
self.init_model_(self.data_type, text_optim="adamw")
predictions = self.network(x)
if epoch != self.hparams["T"]:
loss_value = self.loss(predictions, y).mean()
else:
self.eval()
if predictions.squeeze().ndim == 1:
wrong_predictions = (predictions > 0).cpu().ne(y).float()
else:
wrong_predictions = predictions.argmax(1).cpu().ne(y).float()
self.weights[i] += wrong_predictions.detach() * (self.hparams["up"] - 1)
self.train()
loss_value = None
return loss_value
def load(self, fname):
dicts = torch.load(fname)
self.last_epoch = dicts["epoch"]
if self.last_epoch > self.hparams["T"]:
self.init_model_(self.data_type, text_optim="adamw")
self.load_state_dict(dicts["model"])
self.optimizer.load_state_dict(dicts["optimizer"])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(dicts["scheduler"])
| BalancingGroups-main | models.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import os
import re
import tarfile
from zipfile import ZipFile
import logging
logging.basicConfig(level=logging.INFO)
import gdown
import pandas as pd
from six import remove_move
def download_and_extract(url, dst, remove=True):
gdown.download(url, dst, quiet=False)
if dst.endswith(".tar.gz"):
tar = tarfile.open(dst, "r:gz")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".tar"):
tar = tarfile.open(dst, "r:")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".zip"):
zf = ZipFile(dst, "r")
zf.extractall(os.path.dirname(dst))
zf.close()
if remove:
os.remove(dst)
def download_datasets(data_path, datasets=['celeba', 'waterbirds', 'civilcomments', 'multinli']):
os.makedirs(data_path, exist_ok=True)
dataset_downloaders = {
'celeba': download_celeba,
'waterbirds': download_waterbirds,
'civilcomments': download_civilcomments,
'multinli': download_multinli,
}
for dataset in datasets:
dataset_downloaders[dataset](data_path)
def download_civilcomments(data_path):
logging.info("Downloading CivilComments")
civilcomments_dir = os.path.join(data_path, "civilcomments")
os.makedirs(civilcomments_dir, exist_ok=True)
download_and_extract(
"https://worksheets.codalab.org/rest/bundles/0x8cd3de0634154aeaad2ee6eb96723c6e/contents/blob/",
os.path.join(civilcomments_dir, "civilcomments.tar.gz"),
)
def download_multinli(data_path):
logging.info("Downloading MultiNLI")
multinli_dir = os.path.join(data_path, "multinli")
glue_dir = os.path.join(multinli_dir, "glue_data/MNLI/")
os.makedirs(glue_dir, exist_ok=True)
multinli_tar = os.path.join(glue_dir, "multinli_bert_features.tar.gz")
download_and_extract(
"https://nlp.stanford.edu/data/dro/multinli_bert_features.tar.gz",
multinli_tar,
)
os.makedirs(os.path.join(multinli_dir, "data"), exist_ok=True)
download_and_extract(
"https://raw.githubusercontent.com/kohpangwei/group_DRO/master/dataset_metadata/multinli/metadata_random.csv",
os.path.join(multinli_dir, "data", "metadata_random.csv"),
remove=False
)
def download_waterbirds(data_path):
logging.info("Downloading Waterbirds")
water_birds_dir = os.path.join(data_path, "waterbirds")
os.makedirs(water_birds_dir, exist_ok=True)
water_birds_dir_tar = os.path.join(water_birds_dir, "waterbirds.tar.gz")
download_and_extract(
"https://nlp.stanford.edu/data/dro/waterbird_complete95_forest2water2.tar.gz",
water_birds_dir_tar,
)
def download_celeba(data_path):
logging.info("Downloading CelebA")
celeba_dir = os.path.join(data_path, "celeba")
os.makedirs(celeba_dir, exist_ok=True)
download_and_extract(
"https://drive.google.com/uc?id=1mb1R6dXfWbvk3DnlWOBO8pDeoBKOcLE6",
os.path.join(celeba_dir, "img_align_celeba.zip"),
)
download_and_extract(
"https://drive.google.com/uc?id=1acn0-nE4W7Wa17sIkKB0GtfW4Z41CMFB",
os.path.join(celeba_dir, "list_eval_partition.txt"),
remove=False
)
download_and_extract(
"https://drive.google.com/uc?id=11um21kRUuaUNoMl59TCe2fb01FNjqNms",
os.path.join(celeba_dir, "list_attr_celeba.txt"),
remove=False
)
def generate_metadata(data_path, datasets=['celeba', 'waterbirds', 'civilcomments', 'multinli']):
dataset_metadata_generators = {
'celeba': generate_metadata_celeba,
'waterbirds': generate_metadata_waterbirds,
'civilcomments': generate_metadata_civilcomments,
'multinli': generate_metadata_multinli,
}
for dataset in datasets:
dataset_metadata_generators[dataset](data_path)
def generate_metadata_celeba(data_path):
logging.info("Generating metadata for CelebA")
with open(os.path.join(data_path, "celeba/list_eval_partition.txt"), "r") as f:
splits = f.readlines()
with open(os.path.join(data_path, "celeba/list_attr_celeba.txt"), "r") as f:
attrs = f.readlines()[2:]
f = open(os.path.join(data_path, "metadata_celeba.csv"), "w")
f.write("id,filename,split,y,a\n")
for i, (split, attr) in enumerate(zip(splits, attrs)):
fi, si = split.strip().split()
ai = attr.strip().split()[1:]
yi = 1 if ai[9] == "1" else 0
gi = 1 if ai[20] == "1" else 0
f.write("{},{},{},{},{}\n".format(i + 1, fi, si, yi, gi))
f.close()
def generate_metadata_waterbirds(data_path):
logging.info("Generating metadata for waterbirds")
df = pd.read_csv(os.path.join(data_path, "waterbirds/waterbird_complete95_forest2water2/metadata.csv"))
df = df.rename(columns={"img_id": "id", "img_filename": "filename", "place": "a"})
df[["id", "filename", "split", "y", "a"]].to_csv(
os.path.join(data_path, "metadata_waterbirds.csv"), index=False
)
def generate_metadata_civilcomments(data_path):
logging.info("Generating metadata for civilcomments")
df = pd.read_csv(
os.path.join(data_path, "civilcomments", "all_data_with_identities.csv"),
index_col=0,
)
group_attrs = [
"male",
"female",
"LGBTQ",
"christian",
"muslim",
"other_religions",
"black",
"white",
]
cols_to_keep = ["comment_text", "split", "toxicity"]
df = df[cols_to_keep + group_attrs]
df = df.rename(columns={"toxicity": "y"})
df["y"] = (df["y"] >= 0.5).astype(int)
df[group_attrs] = (df[group_attrs] >= 0.5).astype(int)
df["no active attributes"] = 0
df.loc[(df[group_attrs].sum(axis=1)) == 0, "no active attributes"] = 1
few_groups, all_groups = [], []
train_df = df.groupby("split").get_group("train")
split_df = train_df.rename(columns={"no active attributes": "a"})
few_groups.append(split_df[["y", "split", "comment_text", "a"]])
for split, split_df in df.groupby("split"):
for i, attr in enumerate(group_attrs):
test_df = split_df.loc[
split_df[attr] == 1, ["y", "split", "comment_text"]
].copy()
test_df["a"] = i
all_groups.append(test_df)
if split != "train":
few_groups.append(test_df)
few_groups = pd.concat(few_groups).reset_index(drop=True)
all_groups = pd.concat(all_groups).reset_index(drop=True)
for name, df in {"coarse": few_groups, "fine": all_groups}.items():
df.index.name = "filename"
df = df.reset_index()
df["id"] = df["filename"]
df["split"] = df["split"].replace({"train": 0, "val": 1, "test": 2})
text = df.pop("comment_text")
df[["id", "filename", "split", "y", "a"]].to_csv(
os.path.join(data_path, f"metadata_civilcomments_{name}.csv"), index=False
)
text.to_csv(
os.path.join(data_path, "civilcomments", f"civilcomments_{name}.csv"),
index=False,
)
def generate_metadata_multinli(data_path):
logging.info("Generating metadata for multinli")
df = pd.read_csv(
os.path.join(data_path, "multinli", "data", "metadata_random.csv"), index_col=0
)
df = df.rename(columns={"gold_label": "y", "sentence2_has_negation": "a"})
df = df.reset_index(drop=True)
df.index.name = "id"
df = df.reset_index()
df["filename"] = df["id"]
df = df.reset_index()[["id", "filename", "split", "y", "a"]]
df.to_csv(os.path.join(data_path, "metadata_multinli.csv"), index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Initialize repo with datasets")
parser.add_argument(
"datasets",
nargs="+",
default=['celeba', 'waterbirds', 'civilcomments', 'multinli'],
type=str,
help="Which datasets to download and/or generate metadata for",
)
parser.add_argument(
"--data_path",
default="data",
type=str,
help="Root directory to store datasets",
)
parser.add_argument(
"--download",
action="store_true",
default=False,
)
args = parser.parse_args()
if args.download:
download_datasets(args.data_path, args.datasets)
generate_metadata(args.data_path, args.datasets)
| BalancingGroups-main | setup_datasets.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
import submitit
from itertools import product
from train import run_experiment, parse_args
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in product(*vals):
yield dict(zip(keys, instance))
if __name__ == "__main__":
args = parse_args()
executor = submitit.SlurmExecutor(folder=args['slurm_output_dir'])
executor.update_parameters(
time=args["max_time"],
gpus_per_node=1,
array_parallelism=16,
cpus_per_task=1,
partition=args["partition"])
commands = []
sweep = {
'dataset': ['toy'],
'dim_noise': [1200],
'selector': ['min_acc_va'],
'num_epochs': [500],
'gamma_spu': [4.0],
'gamma_core': [1.0],
'gamma_noise': [2.0, 4.0],
'method': ["erm", "subg", "rwg"],
'lr': [1e-6, 1e-5],
'weight_decay': [0, 0.1, 1, 10],
'batch_size': [250],
'init_seed': list(range(int(args["num_init_seeds"]))),
'T': [1],
'up': [1],
'eta': [0.1],
}
sweep.update({k: [v] for k, v in args.items()})
commands = list(product_dict(**sweep))
print('Launching {} runs'.format(len(commands)))
for i, command in enumerate(commands):
command['hparams_seed'] = i
os.makedirs(args["output_dir"], exist_ok=True)
torch.manual_seed(0)
commands = [commands[int(p)] for p in torch.randperm(len(commands))]
executor.map_array(run_experiment, commands)
| BalancingGroups-main | train_toy.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
import pandas as pd
import numpy as np
from PIL import Image
from torchvision import transforms
from transformers import BertTokenizer
from torch.utils.data import DataLoader
from sklearn.datasets import make_blobs
import pandas as pd
class GroupDataset:
def __init__(
self, split, root, metadata, transform, subsample_what=None, duplicates=None
):
self.transform_ = transform
df = pd.read_csv(metadata)
df = df[df["split"] == ({"tr": 0, "va": 1, "te": 2}[split])]
self.i = list(range(len(df)))
self.x = df["filename"].astype(str).map(lambda x: os.path.join(root, x)).tolist()
self.y = df["y"].tolist()
self.g = df["a"].tolist()
self.count_groups()
if subsample_what is not None:
self.subsample_(subsample_what)
if duplicates is not None:
self.duplicate_(duplicates)
def count_groups(self):
self.wg, self.wy = [], []
self.nb_groups = len(set(self.g))
self.nb_labels = len(set(self.y))
self.group_sizes = [0] * self.nb_groups * self.nb_labels
self.class_sizes = [0] * self.nb_labels
for i in self.i:
self.group_sizes[self.nb_groups * self.y[i] + self.g[i]] += 1
self.class_sizes[self.y[i]] += 1
for i in self.i:
self.wg.append(
len(self) / self.group_sizes[self.nb_groups * self.y[i] + self.g[i]]
)
self.wy.append(len(self) / self.class_sizes[self.y[i]])
def subsample_(self, subsample_what):
perm = torch.randperm(len(self)).tolist()
if subsample_what == "groups":
min_size = min(list(self.group_sizes))
else:
min_size = min(list(self.class_sizes))
counts_g = [0] * self.nb_groups * self.nb_labels
counts_y = [0] * self.nb_labels
new_i = []
for p in perm:
y, g = self.y[self.i[p]], self.g[self.i[p]]
if (
subsample_what == "groups"
and counts_g[self.nb_groups * int(y) + int(g)] < min_size
) or (subsample_what == "classes" and counts_y[int(y)] < min_size):
counts_g[self.nb_groups * int(y) + int(g)] += 1
counts_y[int(y)] += 1
new_i.append(self.i[p])
self.i = new_i
self.count_groups()
def duplicate_(self, duplicates):
new_i = []
for i, duplicate in zip(self.i, duplicates):
new_i += [i] * duplicate
self.i = new_i
self.count_groups()
def __getitem__(self, i):
j = self.i[i]
x = self.transform(self.x[j])
y = torch.tensor(self.y[j], dtype=torch.long)
g = torch.tensor(self.g[j], dtype=torch.long)
return torch.tensor(i, dtype=torch.long), x, y, g
def __len__(self):
return len(self.i)
class Waterbirds(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
root = os.path.join(data_path, "waterbirds/waterbird_complete95_forest2water2/")
metadata = os.path.join(data_path,"metadata_waterbirds.csv")
transform = transforms.Compose(
[
transforms.Resize(
(
int(224 * (256 / 224)),
int(224 * (256 / 224)),
)
),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
super().__init__(split, root, metadata, transform, subsample_what, duplicates)
self.data_type = "images"
def transform(self, x):
return self.transform_(Image.open(x).convert("RGB"))
class CelebA(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
root = os.path.join(data_path, "celeba/img_align_celeba/")
metadata = os.path.join(data_path,"metadata_celeba.csv")
transform = transforms.Compose(
[
transforms.CenterCrop(178),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
super().__init__(split, root, metadata, transform, subsample_what, duplicates)
self.data_type = "images"
def transform(self, x):
return self.transform_(Image.open(x).convert("RGB"))
class MultiNLI(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
root = os.path.join(data_path, "multinli", "glue_data", "MNLI")
metadata = os.path.join(data_path, "metadata_multinli.csv")
self.features_array = []
for feature_file in [
"cached_train_bert-base-uncased_128_mnli",
"cached_dev_bert-base-uncased_128_mnli",
"cached_dev_bert-base-uncased_128_mnli-mm",
]:
features = torch.load(os.path.join(root, feature_file))
self.features_array += features
self.all_input_ids = torch.tensor(
[f.input_ids for f in self.features_array], dtype=torch.long
)
self.all_input_masks = torch.tensor(
[f.input_mask for f in self.features_array], dtype=torch.long
)
self.all_segment_ids = torch.tensor(
[f.segment_ids for f in self.features_array], dtype=torch.long
)
self.all_label_ids = torch.tensor(
[f.label_id for f in self.features_array], dtype=torch.long
)
self.x_array = torch.stack(
(self.all_input_ids, self.all_input_masks, self.all_segment_ids), dim=2
)
self.data_type = "text"
super().__init__(
split, "", metadata, self.transform, subsample_what, duplicates
)
def transform(self, i):
return self.x_array[int(i)]
class CivilComments(GroupDataset):
def __init__(
self,
data_path,
split,
subsample_what=None,
duplicates=None,
granularity="coarse",
):
metadata = os.path.join(data_path,"metadata_civilcomments_{}.csv".format(granularity))
text = pd.read_csv(
os.path.join(
data_path, "civilcomments/civilcomments_{}.csv".format(granularity)
)
)
self.text_array = list(text["comment_text"])
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
self.data_type = "text"
super().__init__(
split, "", metadata, self.transform, subsample_what, duplicates
)
def transform(self, idx):
text = self.text_array[int(idx)]
tokens = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=220,
return_tensors="pt",
)
return torch.squeeze(
torch.stack(
(
tokens["input_ids"],
tokens["attention_mask"],
tokens["token_type_ids"],
),
dim=2,
),
dim=0,
)
class CivilCommentsFine(CivilComments):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
super().__init__(data_path, split, subsample_what, duplicates, "fine")
class Toy(GroupDataset):
def __init__(self, data_path, split, subsample_what=None, duplicates=None):
self.data_type = "toy"
n_samples = 1000
dim_noise = 1200
self.i, self.x, self.y, self.g = self.make_dataset(
n_samples=n_samples,
dim_noise=dim_noise,
core_cor=1.0,
spu_cor=0.8,
train=(split == "tr"),
)
self.count_groups()
if subsample_what is not None:
self.subsample_(subsample_what)
if duplicates is not None:
self.duplicate_(duplicates)
def transform(self, x):
return torch.tensor(x)
def make_dataset(
self,
n_samples=1000,
dim_noise=1200,
blob_std=0.15,
core_cor=1.0,
spu_cor=0.8,
train=True,
):
X = make_blobs(n_samples=n_samples, centers=1, cluster_std=[blob_std])[0]
X -= X.mean(0, keepdims=True) + np.array([[1.0, 1.0]])
y = np.array([-1] * (n_samples // 2) + [1] * (n_samples // 2))
g = np.ones((n_samples))
# making of the core feature
core_features = X[:, 0] * y
# random without replacement
random_indices_for_core = np.random.permutation(np.arange(n_samples))[
: int((1 - core_cor) * n_samples)
]
core_features[random_indices_for_core] *= -1
g[random_indices_for_core] *= -1
# making of the spurious feature
spu_features = X[:, 1] * y
random_indices_for_spu = np.random.permutation(np.arange(n_samples))[
: int((1 - spu_cor) * n_samples)
]
spu_features[random_indices_for_spu] *= -1
g[random_indices_for_spu] *= -1
X = np.vstack([spu_features, core_features]).T
# noise = np.random.randn(n_samples, dim_noise) / np.sqrt(dim_noise)
noise = np.random.randn(n_samples, dim_noise)
if not train:
# The average of noise is zero for both training and the test sets.
# However, for the test set, we compute the "Expected loss" instead
# of the "Empirical loss". For that reason, we can simply set the
# noise to be zero for the test set.
noise *= 0.0
X = np.concatenate([X, noise], 1)
i = np.arange(len(y))
# y denotes the label
# g denotes the group (minority or majority)
# i denotes the index
y = ((y + 1) / 2).astype(int) # 0 or 1
g = ((g + 1) / 2).astype(int) # 0 or 1
return i, X, y, g
def get_loaders(data_path, dataset_name, batch_size, method="erm", duplicates=None):
Dataset = {
"waterbirds": Waterbirds,
"celeba": CelebA,
"multinli": MultiNLI,
"civilcomments": CivilCommentsFine
if method in ("subg", "rwg")
else CivilComments,
"toy": Toy,
}[dataset_name]
def dl(dataset, bs, shuffle, weights):
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights, len(weights))
else:
sampler = None
return DataLoader(
dataset,
batch_size=bs,
shuffle=shuffle,
sampler=sampler,
num_workers=4,
pin_memory=True,
)
if method == "subg":
subsample_what = "groups"
elif method == "suby":
subsample_what = "classes"
else:
subsample_what = None
dataset_tr = Dataset(data_path, "tr", subsample_what, duplicates)
if method == "rwg" or method == "dro":
weights_tr = dataset_tr.wg
elif method == "rwy":
weights_tr = dataset_tr.wy
else:
weights_tr = None
return {
"tr": dl(dataset_tr, batch_size, weights_tr is None, weights_tr),
"va": dl(Dataset(data_path, "va", None), 128, False, None),
"te": dl(Dataset(data_path, "te", None), 128, False, None),
}
| BalancingGroups-main | datasets.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import matplotlib
from matplotlib.colors import ListedColormap
import numpy as np
import torch
import torch.utils.data
from models import ToyNet
from parse import parse_json_to_df
from datasets import Toy
import matplotlib.pyplot as plt
from torch import FloatTensor as FT
import seaborn as sns
from tqdm import tqdm
import itertools
def generate_heatmap_plane(X):
xlim = np.array([-2, 2])
ylim = np.array([-2, 2])
n = 200
d1, d2 = torch.meshgrid(
[torch.linspace(xlim[0], xlim[1], n), torch.linspace(ylim[0], ylim[1], n)]
)
heatmap_plane = torch.stack((d1.flatten(), d2.flatten()), dim=1)
# below, we compute the distance of each point to the training datapoints.
# if the distance is less than 1e-3, that point used the noise dimensions
# of the closest training point.
# 10000 x 300
dists = (heatmap_plane[:, 0:1] - FT(X[:, 0:1].T)) ** 2 + (
heatmap_plane[:, 1:2] - FT(X[:, 1:2].T)
) ** 2
noise_dims = FT(X)[torch.argmin(dists, 1)][:, 2:] * (
dists.min(1)[0] < 0.001
).unsqueeze(1)
return torch.cat([heatmap_plane, noise_dims], 1)
def load_model(path):
state_dict = torch.load(path)
gammas = [
state_dict["model"]["network.gammas"].squeeze()[i].item() for i in range(3)
]
model = torch.nn.ModuleDict({"network": ToyNet(1202, gammas)})
model.load_state_dict(state_dict["model"])
model = model.network
model.to(DEVICE)
return model
def plot(
exps,
all_train_envs,
all_hm,
gammas,
heatmap_plane,
error_df,
filename="toy_exp",
):
heatmap = all_hm.mean(1)
matplotlib.rcParams["contour.negative_linestyle"] = "solid"
cm = ListedColormap(["#C82506", "#0365C0"])
plt.rc("font", size=18, family="Times New Roman")
# plt.figure(figsize=(16, 4.5))
fig, axs = plt.subplots(2, len(exps), figsize=(4 * len(exps), 8))
n = int(np.sqrt(heatmap_plane.shape[0]))
hmp_x = heatmap_plane[:, 0].detach().cpu().numpy().reshape(n, n)
hmp_y = heatmap_plane[:, 1].detach().cpu().numpy().reshape(n, n)
hma = heatmap.reshape(-1, n, n).sigmoid()
for i in range(len(exps)):
ax = axs[0, i] if len(exps) > 1 else axs[0]
vmin, vmax = hma[i, -1, -1], hma[i, 1,1]
delta = vmax-vmin
vmin, vmax = vmin-0.25*delta, vmax+0.25*delta
cm = plt.cm.RdBu.copy()
cm.set_under("#C82506")
cm.set_over("#0365C0")
p = ax.contourf(
hmp_x,
hmp_y,
hma[i],
np.linspace(vmin, vmax, 20),
cmap=cm,
alpha=0.8,
vmin=vmin,
vmax=vmax,
extend="both"
)
ax.contour(
hmp_x, hmp_y, hma[i], [0.5], antialiased=True, linewidths=1.0, colors="k"
)
ax.set_title(exps[i].upper())
ax.set_xlabel("x spu * gamma spu")
ax.set_ylabel("x core * gamma core")
ax.text(-1.7, 1.7, "I", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.text(1.7, 1.7, "II", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.text(-1.7, -1.7, "III", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.text(1.7, -1.7, "IV", horizontalalignment='center', verticalalignment='center', fontsize=18, color="k")
ax.axhline(y=0, ls="--", lw=0.7, color="k", alpha=0.5)
ax.axvline(x=0, ls="--", lw=0.7, color="k", alpha=0.5)
# ax.xaxis.set_major_locator(plt.NullLocator())
# ax.yaxis.set_major_locator(plt.NullLocator())
ax.set_xlim(np.array([-2, 2]))
ax.set_ylim(np.array([-2, 2]))
ticks = [-2, -1, 0, 1, 2]
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels([int(t * gammas[0]) for t in ticks])
ax.set_yticklabels([int(t * gammas[1]) for t in ticks])
for X, y in all_train_envs:
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm, edgecolors='none', s=5, alpha=0.3)
ax_ = axs[1, i] if len(exps) > 1 else axs[1]
l = sns.lineplot(
data=error_df.groupby("method").get_group(exps[i]),
x="epoch",
y="error",
hue="phase",
ax=ax_,
ci=90
)
handles, labels = l.get_legend_handles_labels()
l.get_legend().remove()
ax_.grid(color="k", linestyle="--", linewidth=0.5, alpha=0.3)
ax_.set_title(exps[i].upper())
# ax_.set_xscale("log")
ax_.set_xlabel("Iterations")
ax_.set_ylabel("worst-group-accuracy")
ax_.set_ylim([-0.005, 1.005])
lg = fig.legend(handles, labels, loc='lower center', ncol=3, bbox_to_anchor=(0.5, -0.05))
fig.tight_layout()
plt.savefig(f"figures/{filename}.pdf",bbox_extra_artists=(lg,), bbox_inches='tight')
plt.savefig(f"figures/{filename}.png",bbox_extra_artists=(lg,), bbox_inches='tight')
if __name__ == "__main__":
seeds = 1
n_samples = 1000
dim_noise = 1200
DEVICE = 0
gammas = [4, 1.0, 20.0]
exps = ["erm", "subg", "rwg"]
df = parse_json_to_df(["toy_sweep"])
idx = [
"method",
"lr",
"weight_decay",
"batch_size",
"init_seed",
"epoch",
"file_path",
]
# df.set_index(idx)
def get_ploting_params(df):
models = {
(exp, seed): load_model(path.replace(".pt", ".best.pt"))
for exp, seed, path in (
df.groupby(["method", "init_seed", "file_path"]).groups.keys()
)
}
df = (
df.melt(
id_vars=idx,
value_vars=["min_acc_va", "min_acc_te", "min_acc_tr"],
var_name="phase",
value_name="error",
)
.replace({"min_acc_va": "valid", "min_acc_te": "test", "min_acc_tr": "train"})
.reset_index()
)
datasets = []
for i in range(seeds):
torch.manual_seed(i)
np.random.seed(i)
d = Toy("tr")
datasets.append((d.x, d.y))
all_hm = torch.zeros(len(exps), seeds, 200 * 200)
for exp_i, exp in enumerate(exps):
for i in range(seeds):
heatmap_plane = generate_heatmap_plane(datasets[i][0]).to(DEVICE)
all_hm[exp_i, i] = models[(exp, i)](heatmap_plane).detach().cpu()
return exps, datasets, all_hm, gammas, heatmap_plane, df
groups = df.groupby(
["lr", "weight_decay", "batch_size", "gamma_spu", "gamma_core", "gamma_noise"]
)
for (lr, wd, bs, gms, gmc, gmn), g_df in groups:
plot(
*get_ploting_params(g_df),
filename=f"toy_sweep_lr_{lr}_wd_{wd}_bs_{bs}_gms_{gms}_gmc_{gmc}_gmn_{gmn}",
)
| BalancingGroups-main | plot_toy_scatter.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#!/usr/bin/env python
import os
import sys
import json
import time
import torch
import submitit
import argparse
import numpy as np
import models
from datasets import get_loaders
class Tee:
def __init__(self, fname, stream, mode="a+"):
self.stream = stream
self.file = open(fname, mode)
def write(self, message):
self.stream.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stream.flush()
self.file.flush()
def randl(l_):
return l_[torch.randperm(len(l_))[0]]
def parse_args():
parser = argparse.ArgumentParser(description='Balancing baselines')
parser.add_argument('--output_dir', type=str, default='outputs')
parser.add_argument('--slurm_output_dir', type=str, default='slurm_outputs')
parser.add_argument('--data_path', type=str, default='data')
parser.add_argument('--slurm_partition', type=str, default=None)
parser.add_argument('--max_time', type=int, default=3*24*60)
parser.add_argument('--num_hparams_seeds', type=int, default=20)
parser.add_argument('--num_init_seeds', type=int, default=5)
parser.add_argument('--selector', type=str, default='min_acc_va')
return vars(parser.parse_args())
def run_experiment(args):
start_time = time.time()
torch.manual_seed(args["init_seed"])
np.random.seed(args["init_seed"])
loaders = get_loaders(args["data_path"], args["dataset"], args["batch_size"], args["method"])
sys.stdout = Tee(os.path.join(
args["output_dir"], 'seed_{}_{}.out'.format(
args["hparams_seed"], args["init_seed"])), sys.stdout)
sys.stderr = Tee(os.path.join(
args["output_dir"], 'seed_{}_{}.err'.format(
args["hparams_seed"], args["init_seed"])), sys.stderr)
checkpoint_file = os.path.join(
args["output_dir"], 'seed_{}_{}.pt'.format(
args["hparams_seed"], args["init_seed"]))
best_checkpoint_file = os.path.join(
args["output_dir"],
"seed_{}_{}.best.pt".format(args["hparams_seed"], args["init_seed"]),
)
model = {
"erm": models.ERM,
"suby": models.ERM,
"subg": models.ERM,
"rwy": models.ERM,
"rwg": models.ERM,
"dro": models.GroupDRO,
"jtt": models.JTT
}[args["method"]](args, loaders["tr"])
last_epoch = 0
best_selec_val = float('-inf')
if os.path.exists(checkpoint_file):
model.load(checkpoint_file)
last_epoch = model.last_epoch
best_selec_val = model.best_selec_val
for epoch in range(last_epoch, args["num_epochs"]):
if epoch == args["T"] + 1 and args["method"] == "jtt":
loaders = get_loaders(
args["data_path"],
args["dataset"],
args["batch_size"],
args["method"],
model.weights.tolist())
for i, x, y, g in loaders["tr"]:
model.update(i, x, y, g, epoch)
result = {
"args": args, "epoch": epoch, "time": time.time() - start_time}
for loader_name, loader in loaders.items():
avg_acc, group_accs = model.accuracy(loader)
result["acc_" + loader_name] = group_accs
result["avg_acc_" + loader_name] = avg_acc
selec_value = {
"min_acc_va": min(result["acc_va"]),
"avg_acc_va": result["avg_acc_va"],
}[args["selector"]]
if selec_value >= best_selec_val:
model.best_selec_val = selec_value
best_selec_val = selec_value
model.save(best_checkpoint_file)
model.save(checkpoint_file)
print(json.dumps(result))
if __name__ == "__main__":
args = parse_args()
commands = []
for hparams_seed in range(args["num_hparams_seeds"]):
torch.manual_seed(hparams_seed)
args["hparams_seed"] = hparams_seed
args["dataset"] = randl(
["waterbirds", "celeba", "multinli", "civilcomments"])
args["method"] = randl(
["erm", "suby", "subg", "rwy", "rwg", "dro", "jtt"])
args["num_epochs"] = {
"waterbirds": 300 + 60,
"celeba": 50 + 10,
"multinli": 5 + 2,
"civilcomments": 5 + 2
}[args["dataset"]]
args["eta"] = 0.1
args["lr"] = randl([1e-5, 1e-4, 1e-3])
args["weight_decay"] = randl([1e-4, 1e-3, 1e-2, 1e-1, 1])
if args["dataset"] in ["waterbirds", "celeba"]:
args["batch_size"] = randl([2, 4, 8, 16, 32, 64, 128])
else:
args["batch_size"] = randl([2, 4, 8, 16, 32])
args["up"] = randl([4, 5, 6, 20, 50, 100])
args["T"] = {
"waterbirds": randl([40, 50, 60]),
"celeba": randl([1, 5, 10]),
"multinli": randl([1, 2]),
"civilcomments": randl([1, 2])
}[args["dataset"]]
for init_seed in range(args["num_init_seeds"]):
args["init_seed"] = init_seed
commands.append(dict(args))
os.makedirs(args["output_dir"], exist_ok=True)
torch.manual_seed(0)
commands = [commands[int(p)] for p in torch.randperm(len(commands))]
if args['slurm_partition'] is not None:
executor = submitit.SlurmExecutor(folder=args['slurm_output_dir'])
executor.update_parameters(
time=args["max_time"],
gpus_per_node=1,
array_parallelism=512,
cpus_per_task=4,
partition=args["slurm_partition"])
executor.map_array(run_experiment, commands)
else:
for command in commands:
run_experiment(command)
| BalancingGroups-main | train.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#!/usr/bin/env python
import os
import glob
import json
import argparse
from typing import ContextManager
import pandas as pd
from pandas.core.indexes import multi
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from scipy.stats import alexandergovern
from matplotlib import cm
from pandas.api.types import is_numeric_dtype
def remove(lis, val):
return [value for value in lis if value != val]
def anova_test(best_df, df, metric):
for (dataset, groups), dataset_df in best_df.groupby(level=["dataset", "Groups"]):
metric_values = [
df.get_group(idx)[metric].values for idx, _ in dataset_df.iterrows()
]
best_df.loc[(dataset, slice(None), slice(None), groups), "Signif Diff"] = (
alexandergovern(*metric_values).pvalue < 0.05
)
return best_df
def convert_df_to_readable_format(reduced, bold=None, latex=None):
# Formatting table contents with mean (std)
summary = pd.DataFrame()
pm_sign = "$\\pm$" if latex else "+/-"
for c in reduced.columns.get_level_values(0):
if "mean" in reduced[c] and "std" in reduced[c]:
if "acc" in c.lower():
summary[c] = (
(100 * reduced[c]["mean"]).map("{:.1f}".format)
+ pm_sign
+ (100 * reduced[c]["std"]).map("{:.1f}".format)
)
else:
summary[c] = (
reduced[c]["mean"].map("{:.1f}".format)
+ pm_sign
+ reduced[c]["std"].map("{:.1f}".format)
)
elif "min" in reduced[c]:
summary[c + " range"] = (
"["
+ reduced[c]["min"].map("{:.1f}".format)
+ ", "
+ reduced[c]["max"].map("{:.1f}".format)
+ "]"
)
else:
if is_numeric_dtype(reduced[c]) and reduced[c].dtype == "float":
summary[c] = reduced[c].map("{:.1f}".format)
else:
summary[c] = reduced[c]
if bold:
if latex:
bold_l, bold_r = r"\textbf{", "}"
else:
bold_l, bold_r = "*", ""
best_algos = (
reduced.sort_values((bold["best_metric"], "mean"), ascending=bold["order"])
.groupby(bold["best_metric_group"])
.head(1)
.index
)
summary.loc[best_algos, bold["best_metric"]] = summary.loc[
best_algos, bold["best_metric"]
].map(lambda x: bold_l + x + bold_r)
return summary
def final_latex_table(final_df, df, do_anova, col_to_show):
template_begining = (
r"""
\begin{tabular}{lllccccc}
\toprule
\textbf{Method} & \textbf{\#HP} & \textbf{Groups} & \multicolumn{4}{c}{\textbf{Worst Acc}} & \textbf{Average} \\
\cmidrule(lr){4-7}
& & & CelebA & Waterbirds & MultiNLI & CivilComments & \\
\midrule
"""
)
middle = r""
last_group = None
df = df.set_index(["dataset", "Method"])
for _, row in final_df.iterrows():
for dataset in ["CelebA", "Waterbirds", "MultiNLI", "CivilComments"]:
if do_anova:
if df.loc[(dataset, row["Method"])]["Signif Diff"].item():
row[dataset] = "\cellcolor{blue!7}" + str(
row[dataset]
)
if row["Groups"] != last_group and last_group is not None:
middle += "\\midrule \n"
middle += r" & ".join(row.astype(str).values)
middle += "\\\\ \n"
last_group = row["Groups"]
template_ending = r"""
\bottomrule \\
\end{tabular}
"""
return template_begining + middle + template_ending
def parse_json_to_df(dirs):
records = []
groups = {
"erm": "No",
"jtt": "No",
"suby": "No",
"rwy": "No",
"dro": "Yes",
"rwg": "Yes",
"subg": "Yes",
}
nb_hps = {"erm": 4, "jtt": 6, "suby": 4, "rwy": 4, "dro": 5, "rwg": 4, "subg": 4}
for dname in dirs:
for fname in glob.glob(os.path.join(dname, "*.out")):
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
if not line.startswith("{"):
continue
record = json.loads(line)
this_row = dict(record["args"])
this_row["epoch"] = record["epoch"]
this_row["time"] = record["time"] / 3600
this_row["min_acc_va"] = min(record["acc_va"])
this_row["min_acc_tr"] = min(record["acc_tr"])
this_row["avg_acc_va"] = record["avg_acc_va"]
this_row["min_acc_te"] = min(record["acc_te"])
this_row["avg_acc_te"] = record["avg_acc_te"]
this_row["Groups"] = groups[this_row["method"]]
this_row["#HP"] = nb_hps[this_row["method"]]
this_row["file_path"] = os.path.splitext(fname)[0] + ".pt"
records.append(this_row)
if not len(records):
quit()
pd.set_option(
"display.max_rows", None, "display.max_columns", None, "display.width", None
)
return pd.DataFrame(records)
def reorganize_df(df, col_to_show=None):
df = (
df.set_index(["dataset", "Method", "#HP", "Groups"])[col_to_show]
.unstack(level=0)
.sort_index(axis=0, level=2)
)
df.columns = df.columns.set_names(None)
df = df.sort_index(axis=1)
# df = df.reindex(['Worst Acc', 'Time (h)', 'Signif Diff'], level=1, axis=1)
df = df.reindex(["CelebA", "Waterbirds", "MultiNLI", "CivilComments"], axis=1)
df = df.reset_index()
return df
def model_paths(df, run_groups):
models_to_save = []
for idx, row in df.iterrows():
models_to_save.append(run_groups.get_group(idx)["file_path"])
return pd.concat(models_to_save)
def print_hp_table(df, aggregate=True):
hps = ["lr", "weight_decay", "epoch", "batch_size"]
hparams = df[[(hp, "mean") for hp in hps]].droplevel(1, axis=1)
hparams = hparams.apply(
{
"lr": np.log10,
"weight_decay": np.log10,
"epoch": lambda x: x,
"batch_size": lambda x: x,
}
)
if aggregate:
hparams = hparams.groupby(["dataset", "Groups", "method"]).agg(["mean", "std"])
metric = ("min_acc_te", "mean")
hparams[("min_acc_te", "min")] = (
df.groupby(["dataset", "Groups", "method"]).min()[metric] * 100
)
hparams[("min_acc_te", "max")] = (
df.groupby(["dataset", "Groups", "method"]).max()[metric] * 100
)
hparams[("min_acc_te_delta", "")] = (
hparams[("min_acc_te", "max")] - hparams[("min_acc_te", "min")]
)
else:
hparams = pd.concat([hparams, df[["min_acc_te"]]], axis=1)
hparams.columns = pd.MultiIndex.from_tuples(
[(hp, "") for hp in hps] + df[["min_acc_te"]].columns.tolist()
)
hparams = hparams.droplevel(["hparams_seed", "#HP"], axis=0)
hparams = hparams.reorder_levels(["dataset", "Groups", "method"])
# print(hparams)
hparams = hparams.sort_index()
print(convert_df_to_readable_format(hparams))
df = convert_df_to_readable_format(hparams, latex=True)
cmaps = {
"lr": "bone",
"weight_decay": "pink",
"epoch": "bone",
"batch_size": "pink",
}
groups = hparams.groupby(["dataset"])
for idx, row in hparams.iterrows():
for hp in ["lr", "weight_decay", "batch_size", "epoch"]:
cmap = cm.get_cmap(cmaps[hp])
hp_tup = (hp, "mean") if aggregate else hp
scale = {
"min": groups.get_group(idx[0])[hp_tup].min().item(),
"max": groups.get_group(idx[0])[hp_tup].max().item(),
}
max_level = {
"lr": 1 / 6,
"weight_decay": 1 / 6,
"batch_size": 1 / 6,
"epoch": 1 / 6,
}[hp]
if hp in ["weight_decay", "batch_size"]:
level = 1 - (
max_level
* (row[hp_tup].item() - scale["min"])
/ (scale["max"] - scale["min"])
)
else:
level = 1 + (
max_level
* (row[hp_tup].item() - scale["max"])
/ (scale["max"] - scale["min"])
)
color = ["{:.3f}".format(c) for c in cmap(level)[:3]]
df.loc[idx, hp] = (
"\cellcolor[rgb]{" + ",".join(color) + "}" + str(df.loc[idx, hp])
)
filename = "hp_table_mean" if aggregate else "hp_table"
df.to_latex(f"tables/{filename}.tex", multicolumn=True, multirow=True, escape=False)
def plot_min_acc_evol(best_df, all_runs, filename):
df = []
all_runs_groups = all_runs.groupby(best_df.index.names)
for idx, _ in best_df.iterrows():
df.append(all_runs_groups.get_group(idx))
df = (
pd.concat(df)
.sort_index()
.reindex(["CelebA", "Waterbirds", "MultiNLI", "CivilComments"], level="dataset")
)
groups = df.groupby(
["dataset", "method", "hparams_seed", "init_seed", "Groups", "#HP"]
)
windows = {
"CelebA": 5,
"Waterbirds": 10,
}
dfs = []
for group, df_group in groups:
if group[0] in windows:
dfs.append(df_group.rolling(window=windows[group[0]]).mean())
else:
dfs.append(df_group)
df = pd.concat(dfs)
plt.rc("font", size=11)
df = (
df.melt(
value_vars=["min_acc_te", "min_acc_tr"],
var_name="phase",
value_name="worst-group-acc",
ignore_index=False,
)
.replace({"min_acc_te": "test", "min_acc_tr": "train"})
.reset_index()
)
sns.set_theme(context="talk", style="white", font="Times New Roman")
scale = 1
# plt.figure(figsize=(scale * 8, scale * 11))
g = sns.relplot(
data=df,
x="epoch",
y="worst-group-acc",
hue="method",
style="phase",
kind="line",
row="Groups",
col="dataset",
height=scale * 3.5,
aspect=1,
facet_kws=dict(sharex=False, sharey=False, margin_titles=True),
alpha=0.7,
)
g.set_axis_labels("epoch", "worst-group-acc")
g.set_titles(row_template="Groups = {row_name}", col_template="{col_name}")
# g.add_legend(loc="lower center", ncol=4)
g.tight_layout()
plt.savefig(f"figures/{filename}.pdf", dpi=300)
plt.savefig(f"figures/{filename}.png", dpi=300)
def format_result_tables(df, run_groups, do_anova=False):
if do_anova:
df = anova_test(df, run_groups, "min_acc_te")
df = df.reset_index()
if not args.full:
df = df[["dataset", "method", "Groups", "#HP", "min_acc_te", "time"]]
df = df.rename(
columns={"min_acc_te": "Worst Acc", "time": "Time (h)", "method": "Method"}
)
multip = 100 if args.col_to_show == "Worst Acc" else 1
avg_accs_per_method = (
(multip * df.groupby("Method").mean()[(args.col_to_show, "mean")])
.map("{:.1f}".format)
.reset_index(name="Average")
)
if args.bold:
bold = {
"best_metric": args.col_to_show,
"order": False if "acc" in args.col_to_show.lower() else True,
"best_metric_group": ["dataset", "Groups"],
}
else:
bold = False
term_df = convert_df_to_readable_format(df, bold, latex=False)
term_df = reorganize_df(term_df, col_to_show=args.col_to_show)
term_df = term_df.merge(avg_accs_per_method, on="Method", how="left")
print(term_df)
latex_df = convert_df_to_readable_format(df, bold, latex=True)
latex_df = reorganize_df(latex_df, col_to_show=args.col_to_show)
latex_df = latex_df.merge(avg_accs_per_method, on="Method", how="left")
os.makedirs("tables", exist_ok=True)
open(
f'tables/result_{args.col_to_show.replace(" ", "_").replace("(","").replace(")","").lower()}_1.tex',
"w",
).write(final_latex_table(latex_df, df, do_anova, args.col_to_show))
def format_time_results(df_all_epochs, unique_run_id):
time_delta = df_all_epochs.groupby(unique_run_id)["time"].diff() * 60
time_delta = time_delta[
time_delta > 0
] # Remove negative values coming from preemption
total_time = time_delta.sum().item()
print("Total compute time : " + str(total_time))
time_result = time_delta.groupby(["dataset", "method", "#HP", "Groups"]).median()
average = (
time_result.groupby(["method", "#HP", "Groups"]).mean().to_frame("Average")
)
time_result = time_result.unstack("dataset").sort_index(level="Groups")
time_result = time_result.join(average).apply(lambda x: x.map("{:.2f}".format))
print(time_result)
time_result.to_latex(
"tables/result_time_h.tex", escape=False, multirow=True, multicolumn=True
)
sns.set(style="whitegrid", context="talk")
g = sns.catplot(
data=time_delta.to_frame("time").reset_index(),
x="method",
y="time",
col="dataset",
kind="box",
sharex=True,
sharey=False,
height=6,
col_wrap=2,
)
for ax in g.fig.axes:
ax.set_yscale("log")
ax.tick_params(axis="x", labelrotation=45)
g.set_axis_labels("Method", "Time per epoch in minutes")
g.set_titles(col_template="{col_name}")
g.tight_layout()
plt.savefig(f"figures/time_per_epoch.pdf", dpi=300)
plt.savefig(f"figures/time_per_epoch.png", dpi=300)
def plot_min_acc_dist(df, run_groups, n):
dfs = []
for idx, _ in df.iterrows():
dfs.append(run_groups.get_group(idx)["min_acc_te"])
df = pd.concat(dfs).sort_index(level="Groups")
df = df.reindex(
["CelebA", "Waterbirds", "MultiNLI", "CivilComments"], level="dataset"
).reset_index()
sns.set(style="whitegrid", context="talk", font="Times New Roman")
g = sns.catplot(
data=df,
x="method",
y="min_acc_te",
col="dataset",
kind="box",
sharex=True,
sharey=False,
height=4.5,
)
for ax in g.fig.axes:
ax.tick_params(axis="x", labelrotation=45)
g.set_axis_labels("Method", "worst-group-acc")
g.set_titles(col_template="{col_name}")
g.tight_layout()
plt.savefig(f"figures/worst_group_acc_dist_dataset_{n}.pdf", dpi=300)
plt.savefig(f"figures/worst_group_acc_dist_dataset_{n}.png", dpi=300)
plt.figure()
g = sns.catplot(data=df, x="method", y="min_acc_te", kind="box", height=5.5)
for ax in g.fig.axes:
ax.tick_params(axis="x", labelrotation=45)
g.set_axis_labels("Method", "worst-group-acc")
g.tight_layout()
plt.savefig(f"figures/worst_group_acc_dist_{n}.pdf", dpi=300)
plt.savefig(f"figures/worst_group_acc_dist_{n}.png", dpi=300)
def print_unfinished_runs(dir):
errored_runs = []
for d in dir:
l = os.popen(f"grep -il error {d}/*.err").read()
l = [o for o in l.split("\n") if o]
errored_runs.extend(l)
# unfinished_runs = []
for run in errored_runs:
run_json = os.path.splitext(run)[0] + ".out"
with open(run_json) as f:
last_epoch = f.readlines()[-1]
last_epoch = json.loads(last_epoch)
if last_epoch["epoch"] + 1 != last_epoch["args"]["num_epochs"]:
print(run_json)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse sweep folder")
parser.add_argument("dir", type=str, nargs="+")
parser.add_argument("--selector", type=str, default="min_acc_va")
parser.add_argument("--metric", type=str, default="min_acc_te")
parser.add_argument("--col_to_show", type=str, default="Worst Acc")
parser.add_argument("--n", type=int, default=1)
parser.add_argument("--full", action="store_true")
parser.add_argument("--last_epoch", action="store_true")
parser.add_argument("--do_anova", action="store_true")
parser.add_argument("--bold", action="store_true")
parser.add_argument("--small_weight_decay", action="store_true")
parser.add_argument("--small_lr", action="store_true")
parser.add_argument(
"--mode",
type=str,
choices=[
"format_results",
"format_time_results",
"best_model_paths",
"best_mean_model_paths",
"print_hp_table",
"unfinished_runs",
"plot_min_acc_evol",
"plot_min_acc_dist",
],
default="format_results",
)
args = parser.parse_args()
if args.mode == "unfinished_runs":
print_unfinished_runs(args.dir)
exit()
df = parse_json_to_df(args.dir)
if args.small_weight_decay:
df = df[df["weight_decay"] == 1e-4]
if args.small_lr:
df = df[df["lr"] == 1e-5]
unique_run_id = ["dataset", "method", "hparams_seed", "init_seed", "Groups", "#HP"]
# Renaming datasets
df = df.replace(
{
"celeba": "CelebA",
"waterbirds": "Waterbirds",
"multinli": "MultiNLI",
"civilcomments": "CivilComments",
}
)
df["method"] = df["method"].str.upper()
df = df.replace({"DRO": "gDRO"})
df_all_epochs = df.set_index(unique_run_id + ["epoch"])
df = (
df.sort_values(by="epoch")
if args.last_epoch
else df.sort_values(by=args.selector)
)
df = df.groupby(unique_run_id).tail(1).set_index(unique_run_id)
df_all = df
# Averaging over init seeds
run_groups = df.groupby(remove(unique_run_id, "init_seed"))
df = run_groups.agg(["mean", "std"])
# Selecting best hyperparmeters in average
df = df.sort_values(by=["dataset", "method", (args.selector, "mean")])
df = df.groupby(["dataset", "method"]).tail(args.n)
if args.mode == "best_model_paths":
best_models = (
df_all.sort_values(by=["dataset", "method", args.selector])
.groupby(["dataset", "method"])
.tail(args.n)
)
# print(best_models)
for path in best_models["file_path"].values:
print(path)
elif args.mode == "best_mean_model_paths":
best_model_paths = model_paths(df, run_groups)
for path in best_model_paths.values:
print(path)
elif args.mode == "print_hp_table":
print_hp_table(df, aggregate=(args.n > 1))
elif args.mode == "format_results":
format_result_tables(df, run_groups, args.do_anova)
elif args.mode == "format_time_results":
format_time_results(df_all_epochs, unique_run_id)
elif args.mode == "plot_min_acc_evol":
plot_min_acc_evol(
df,
df_all_epochs,
"worst_acc_evol" if args.n == 1 else f"worst_acc_evol_mean{args.n}",
)
elif args.mode == "plot_min_acc_dist":
plot_min_acc_dist(df, run_groups, args.n)
| BalancingGroups-main | parse.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from utils.masks import generate_masks, evaluate_masks
import torch
def train(*args, **kwargs):
return {}
params = {}
n_data = 400
# Specify dataset configuration
split_config = {
"public": 0.5, # 50% of the data will be in the public bucket
"private": {
"train": 0.25,
"heldout": 0.25
}
}
# Randomly split the data according to the configuration
known_masks, hidden_masks = generate_masks(n_data, split_config)
print(known_masks, hidden_masks)
# Typical output
typical_known_masks = {
# Data sample number 0 is in the private set, data sample 1 ...
"public": [0, 1, 1, 0],
"private": [1, 0, 0, 1]
}
typical_hidden_masks = {
"public": [0, 1, 1, 0],
"private": {
"train": [1, 0, 0, 0],
"heldout": [0, 0, 0, 1]
}
}
# Private model is trained once
model_private = train(params, hidden_masks["private"]["train"])
# Attacker can then use the "public masks" that he knows about to make their privacy attacks
# Note that the attacker does not have access to hidden_masks
model_public = train(params, known_masks["public"])
def privacy_attack(model_private, private_masks):
"""
Random attack model
"""
return torch.rand(len(private_masks))
guessed_membership = privacy_attack(model_private, known_masks["private"])
# guessed_membership is typically something like [0.5, 0.7]
# At evaluation time, the guessed membership are compared to the true ones
# Only then can hidden_masks be checked
# import ipdb;ipdb.set_trace()
print(evaluate_masks(guessed_membership, hidden_masks["private"], threshold=0.5))
# Computes precision, recall, accuracy, etc. | calibration_membership-main | api.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import torch
from models import build_model
from datasets import get_dataset
from utils.evaluator import Evaluator
from utils.logger import create_logger
from utils.misc import bool_flag
from utils.trainer import Trainer
from utils.masks import generate_masks
import socket
import signal
import subprocess
import torch.nn as nn
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
"""
params.is_slurm_job = 'SLURM_JOB_ID' in os.environ and not params.debug_slurm
# logger.info("SLURM job: %s" % str(params.is_slurm_job))
# SLURM job
print('slurm job', params.is_slurm_job)
if params.is_slurm_job:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
SLURM_VARIABLES = [
'SLURM_JOB_ID',
'SLURM_JOB_NODELIST', 'SLURM_JOB_NUM_NODES', 'SLURM_NTASKS', 'SLURM_TASKS_PER_NODE',
'SLURM_MEM_PER_NODE', 'SLURM_MEM_PER_CPU',
'SLURM_NODEID', 'SLURM_PROCID', 'SLURM_LOCALID', 'SLURM_TASK_PID'
]
PREFIX = "%i - " % int(os.environ['SLURM_PROCID'])
for name in SLURM_VARIABLES:
value = os.environ.get(name, None)
# logger.info(PREFIX + "%s: %s" % (name, str(value)))
# # job ID
params.job_id = os.environ['SLURM_JOB_ID']
# number of nodes / node ID
params.n_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])
params.node_id = int(os.environ['SLURM_NODEID'])
# local rank on the current node / global rank
params.local_rank = int(os.environ['SLURM_LOCALID'])
params.global_rank = int(os.environ['SLURM_PROCID'])
# number of processes / GPUs per node
params.world_size = int(os.environ['SLURM_NTASKS'])
params.n_gpu_per_node = params.world_size // params.n_nodes
# define master address and master port
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']])
params.master_addr = hostnames.split()[0].decode('utf-8')
assert 10001 <= params.master_port <= 20000 or params.world_size == 1
# logger.info(PREFIX + "Master address: %s" % params.master_addr)
# logger.info(PREFIX + "Master port : %i" % params.master_port)
# set environment variables for 'env://'
os.environ['MASTER_ADDR'] = params.master_addr
os.environ['MASTER_PORT'] = str(params.master_port)
os.environ['WORLD_SIZE'] = str(params.world_size)
os.environ['RANK'] = str(params.global_rank)
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif params.local_rank != -1:
assert params.master_port == -1
# read environment variables
params.global_rank = int(os.environ['RANK'])
params.world_size = int(os.environ['WORLD_SIZE'])
params.n_gpu_per_node = int(os.environ['NGPU'])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
# local job (single GPU)
else:
assert params.local_rank == -1
assert params.master_port == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
print('n_nodes', params.n_nodes)
print('multi gpu', params.multi_gpu)
print('world size', params.world_size)
# summary
PREFIX = "%i - " % params.global_rank
# logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
# logger.info(PREFIX + "Node ID : %i" % params.node_id)
# logger.info(PREFIX + "Local rank : %i" % params.local_rank)
# logger.info(PREFIX + "Global rank : %i" % params.global_rank)
# logger.info(PREFIX + "World size : %i" % params.world_size)
# logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
# logger.info(PREFIX + "Master : %s" % str(params.is_master))
# logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node))
# logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
# logger.info(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
# logger.info("Initializing PyTorch distributed ...")
torch.distributed.init_process_group(
init_method='env://',
backend='nccl',
)
def sig_handler(signum, frame):
# logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ['SLURM_PROCID'])
# logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
# logger.warning("Requeuing job " + os.environ['SLURM_JOB_ID'])
os.system('scontrol requeue ' + os.environ['SLURM_JOB_ID'])
else:
'nothing'
# logger.warning("Not the master process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
'nothing'
# logger.warning("Signal handler called with signal " + str(signum))
# logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
# logger.warning("Signal handler installed.")
def check_parameters(params):
assert params.dump_path is not None
os.makedirs(params.dump_path, exist_ok=True)
def get_parser():
"""
Generate a parameters parser.
"""
parser = argparse.ArgumentParser(description='Train/evaluate image classification models')
# config parameters
parser.add_argument("--dump_path", type=str, default=None)
parser.add_argument('--print_freq', type=int, default=5)
parser.add_argument("--save_periodic", type=int, default=0)
# Data parameters
parser.add_argument("--data_root", type=str, default="data")
parser.add_argument("--dataset", type=str, choices=["cifar10", "cifar100","imagenet", "gaussian","credit", "hep", "adult", "mnist", "lfw"], default="cifar10")
parser.add_argument("--mask_path", type=str, required=True)
parser.add_argument('--n_data', type=int, default=500)
parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--data_num_dimensions', type=int, default=75)
parser.add_argument('--random_seed', type=int, default=10)
parser.add_argument("--scale", type=float, default=1.0)
# Model parameters
parser.add_argument("--architecture", choices=["lenet", "smallnet", "alexnet", "kllenet", "linear", "mlp", "resnet18", "leaks"], default="lenet")
# training parameters
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=2)
parser.add_argument("--optimizer", default="sgd,lr=0.001,momentum=0.9")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--aug", type=bool_flag, default=False)
parser.add_argument("--in_channels", type=int, default=3)
parser.add_argument("--private_train_split", type=float, default=0.25)
parser.add_argument("--private_heldout_split", type=float, default=0.25)
# privacy parameters
parser.add_argument("--private", type=bool_flag, default=False)
parser.add_argument("--noise_multiplier", type=float, default=None)
parser.add_argument("--privacy_epsilon", type=float, default=None)
parser.add_argument("--privacy_delta", type=float, default=None)
parser.add_argument("--log_gradients", type=bool_flag, default=False)
parser.add_argument("--log_batch_models", type=bool_flag, default=False)
parser.add_argument("--log_epoch_models", type=bool_flag, default=False)
parser.add_argument("--max_grad_norm", type=float, default=1.0)
#multi gpu paramaeters
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--master_port", type=int, default=-1)
parser.add_argument("--debug_slurm", type=bool_flag, default=False)
return parser
def train(params, mask):
# Create logger and print params
logger = create_logger(params)
# initialize the multi-GPU / multi-node training
init_distributed_mode(params)
if params.is_slurm_job:
init_signal_handler()
trainloader, n_data = get_dataset(params=params, is_train=True, mask=mask)
validloader, _ = get_dataset(params=params, is_train=False)
model = build_model(params)
model.cuda()
if params.multi_gpu:
if params.private:
raise NotImplementedError('Distributed training not implemented with privacy')
else:
print('Using multi gpu')
model = nn.parallel.DistributedDataParallel(model, device_ids=[params.local_rank], output_device=params.local_rank, broadcast_buffers=True)
trainer = Trainer(model, params, n_data=n_data)
trainer.reload_checkpoint()
evaluator = Evaluator(model, params)
# evaluation
# if params.eval_only:
# scores = evaluator.run_all_evals(trainer, evals=['classif'], data_loader=validloader)
# for k, v in scores.items():
# logger.info('%s -> %.6f' % (k, v))
# logger.info("__log__:%s" % json.dumps(scores))
# exit()
# training
for epoch in range(trainer.epoch, params.epochs):
# update epoch / sampler / learning rate
trainer.epoch = epoch
logger.info("============ Starting epoch %i ... ============" % trainer.epoch)
# train
for (idx, images, targets) in trainloader:
trainer.classif_step(idx, images, targets)
trainer.end_step()
logger.info("============ End of epoch %i ============" % trainer.epoch)
# evaluate classification accuracy
scores = evaluator.run_all_evals(evals=['classif'], data_loader=validloader)
for name, val in trainer.get_scores().items():
scores[name] = val
# print / JSON log
for k, v in scores.items():
logger.info('%s -> %.6f' % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
# end of epoch
trainer.end_epoch(scores)
return model
if __name__ == '__main__':
parser = get_parser()
params = parser.parse_args()
check_parameters(params)
if params.dataset=='imagenet':
n_data=1281167
elif params.dataset=='credit':
n_data=800
elif params.dataset=='hep':
n_data=124
elif params.dataset=='adult':
n_data=32561
elif params.dataset=='mnist':
n_data=60000
elif params.dataset=='lfw':
#need to do the split here and fill this in
n_data=912
else:
n_data=50000
if params.mask_path=="none":
split_config = {"public": {"train": 0.25,"heldout": 0.25}, "private": {"train": params.private_train_split,"heldout": params.private_heldout_split}}
# Randomly split the data according to the configuration
known_masks, hidden_masks = generate_masks(n_data, split_config)
path = "data/"
torch.save(known_masks['public'], path + "public.pth")
torch.save(known_masks['private'], path + "private.pth")
torch.save(hidden_masks['private']['train'], path + "hidden/train.pth")
torch.save(hidden_masks['private']['heldout'], path + "hidden/heldout.pth")
torch.save(hidden_masks['public']['train'], path + "hidden/public_train.pth")
torch.save(hidden_masks['public']['heldout'], path + "hidden/public_heldout.pth")
mask=hidden_masks['private']['train']
else:
mask = torch.load(params.mask_path)
train(params, mask)
| calibration_membership-main | training/image_classification.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import os
from models import build_model
from datasets import get_dataset
from utils.trainer import Trainer
from utils.logger import create_logger
from utils.misc import bool_flag
def check_parameters(params):
if params.private:
assert params.privacy_epsilon is not None
assert params.dump_path is not None
os.makedirs(params.dump_path, exist_ok=True)
def get_parser():
"""
Generate a parameters parser.
"""
parser = argparse.ArgumentParser(description='Train/evaluate a language model')
# Config parameters
parser.add_argument("--dump_path", type=str, default=None)
parser.add_argument('--print_freq', type=int, default=5)
parser.add_argument("--save_periodic", type=int, default=0)
# Data parameters
parser.add_argument("--data_root", type=str, default="data")
parser.add_argument("--dataset", choices=["dummy"], default='dummy')
parser.add_argument("--n_vocab", type=int, default=256)
# Model parameters
parser.add_argument("--architecture", type=str, default='lstm')
parser.add_argument("--embedding_dim", type=int, default=64)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--num_layers", type=int, default=1)
# Training parameters
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--epochs", type=int, default=2)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--optimizer", default="sgd,lr=0.001,momentum=0.9")
parser.add_argument("--seq_len", type=int, default=256)
# Privacy parameters
parser.add_argument("--private", type=bool_flag, default=False)
parser.add_argument("--noise_multiplier", type=float, default=None)
parser.add_argument("--privacy_epsilon", type=float, default=None)
parser.add_argument("--privacy_delta", type=float, default=None)
parser.add_argument("--privacy_fake_samples", type=int, default=None)
parser.add_argument("--log_gradients", type=bool_flag, default=False)
return parser
def main(params):
# Create logger and print params (very useful for debugging)
logger = create_logger(params)
trainloader, n_data = get_dataset(params, split='train', is_train=True)
validloader, _ = get_dataset(params, split='valid', is_train=False)
model = build_model(params)
model.cuda()
trainer = Trainer(model, params, n_data=n_data)
trainer.reload_checkpoint()
# evaluator = Evaluator(trainer, params)
# evaluation
# if params.eval_only:
# scores = evaluator.run_all_evals(trainer, evals=['classif'], data_loader=validloader)
# for k, v in scores.items():
# logger.info('%s -> %.6f' % (k, v))
# logger.info("__log__:%s" % json.dumps(scores))
# exit()
# training
for epoch in range(trainer.epoch, params.epochs):
# update epoch / sampler / learning rate
trainer.epoch = epoch
logger.info("============ Starting epoch %i ... ============" % trainer.epoch)
# train
for (idx, sentence) in trainloader:
trainer.lm_step(idx, sentence)
trainer.end_step()
logger.info("============ End of epoch %i ============" % trainer.epoch)
# evaluate classification accuracy
# scores = evaluator.run_all_evals(trainer, evals=['classif'], data_loader=validloader)
scores = {}
for name, val in trainer.get_scores().items():
scores[name] = val
# print / JSON log
for k, v in scores.items():
logger.info('%s -> %.6f' % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
# end of epoch
trainer.end_epoch(scores)
if __name__ == '__main__':
parser = get_parser()
params = parser.parse_args()
check_parameters(params)
main(params)
| calibration_membership-main | training/language_modeling.py |
calibration_membership-main | training/__init__.py |
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, TensorDataset
import numpy as np
from numpy.random import multivariate_normal
from sklearn.datasets import fetch_openml
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
import pandas as pd
from sklearn.datasets import fetch_lfw_people
from .text_data import TextIterator
class IdxDataset(Dataset):
"""
Wraps a dataset so that with each element is also returned its index
"""
def __init__(self, dataset: Dataset):
self.dataset = dataset
def __getitem__(self, i: int):
sample = self.dataset[i]
if type(sample) is tuple:
sample = list(sample)
sample.insert(0, i)
return tuple(sample)
else:
return i, sample
def __len__(self):
return len(self.dataset)
class MaskDataset(Dataset):
def __init__(self, dataset: Dataset, mask: torch.Tensor):
"""
example:
mask: [0, 1, 1]
cumul: [-1, 0, 1]
remap: {0: 1, 1: 2}
"""
assert mask.dim() == 1
assert mask.size(0) == len(dataset)
assert mask.dtype == torch.bool
mask = mask.long()
cumul = torch.cumsum(mask, dim=0) - 1
self.remap = {}
for i in range(mask.size(0)):
if mask[i] == 1:
self.remap[cumul[i].item()] = i
assert mask[i] in [0, 1]
self.dataset = dataset
self.mask = mask
self.length = cumul[-1].item() + 1
def __getitem__(self, i: int):
return self.dataset[self.remap[i]]
def __len__(self):
return self.length
def adult_data_transform(df):
binary_data = pd.get_dummies(df)
feature_cols = binary_data[binary_data.columns[:-2]]
scaler = preprocessing.StandardScaler()
data = pd.DataFrame(scaler.fit_transform(feature_cols), columns=feature_cols.columns)
return data
def get_transform(dataset, aug, is_train):
if dataset == "cifar10":
if aug and is_train:
print('Using data augmentation to train model')
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
elif dataset=='mnist':
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])
elif dataset=='imagenet':
if aug and is_train:
print('Using data augmentation to train model')
augmentations = [transforms.Resize(256),transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
elif dataset=='cifar100':
if aug and is_train:
print('Using data augmentation to train model')
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])])
return transform
def get_dataset(*, params, is_train, mask=None):
if is_train:
assert mask is not None
if params.dataset == "cifar10":
if is_train:
transform = get_transform(params.dataset, params.aug, True)
else:
transform = get_transform(params.dataset, params.aug, False)
dataset = torchvision.datasets.CIFAR10(root=params.data_root, train=is_train, download=True, transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 10
return dataloader, n_data
elif params.dataset=="imagenet":
if is_train:
transform = get_transform(params.dataset, params.aug, True)
else:
transform = get_transform(params.dataset, params.aug, False)
if is_train:
dataset = torchvision.datasets.ImageFolder(root=params.data_root+'/train',transform=transform)
else:
dataset = torchvision.datasets.ImageFolder(root=params.data_root+'/val',transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 1000
return dataloader, n_data
elif params.dataset=='cifar100':
if is_train:
transform = get_transform(params.dataset, params.aug, True)
else:
transform = get_transform(params.dataset, params.aug, False)
dataset = torchvision.datasets.CIFAR100(root=params.data_root, train=is_train, download=True, transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 100
return dataloader, n_data
elif params.dataset=='mnist':
transform = get_transform(params.dataset, params.aug, True)
dataset = torchvision.datasets.MNIST(root=params.data_root, train=is_train, download=True, transform=transform)
dataset = IdxDataset(dataset)
if mask is not None:
dataset = MaskDataset(dataset, mask)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers)
n_data = len(dataset)
params.num_classes = 10
return dataloader, n_data
elif params.dataset=='gaussian':
x,y=get_gaussian_dataset(params.n_data,params.num_classes,params.data_num_dimensions,params.random_seed,scale=params.scale)
data = []
for i in range(len(x)):
data.append([i,x[i], y[i]])
dataloader = torch.utils.data.DataLoader(data, shuffle=True, batch_size=params.batch_size)
return dataloader, params.n_data
elif params.dataset=='credit':
cred=fetch_openml('credit-g')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(cred.data).transform(cred.data)
target = preprocessing.LabelEncoder().fit(cred.target).transform(cred.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
if is_train:
ids=np.arange(1000)[:800]
else:
ids=np.arange(1000)[800:]
final_data = []
for i in ids:
final_data.append([i,data[i], target[i]])
norm=np.max
params.num_classes = 2
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
print('Datasize', n_data)
return dataloader, n_data
elif params.dataset=='hep':
hep=fetch_openml('hepatitis')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(hep.data).transform(hep.data)
target = preprocessing.LabelEncoder().fit(hep.target).transform(hep.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
if is_train:
ids=np.arange(155)[:124]
else:
ids=np.arange(155)[124:]
final_data = []
for i in ids:
final_data.append([i,data[i], target[i]])
params.num_classes = 2
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
print('Datasize', n_data)
return dataloader, n_data
elif params.dataset == 'adult':
columns = ["age", "workClass", "fnlwgt", "education", "education-num","marital-status", "occupation", "relationship","race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country", "income"]
train_data = pd.read_csv(params.data_root+'/adult.data', names=columns, sep=' *, *', na_values='?')
test_data = pd.read_csv(params.data_root+'/adult.test', names=columns, sep=' *, *', skiprows=1, na_values='?')
original_train=train_data
original_test=test_data
num_train = len(original_train)
original = pd.concat([original_train, original_test])
labels = original['income']
labels = labels.replace('<=50K', 0).replace('>50K', 1)
labels = labels.replace('<=50K.', 0).replace('>50K.', 1)
# Remove target
del original["income"]
data = adult_data_transform(original)
train_data = data[:num_train]
train_labels = labels[:num_train]
test_data = data[num_train:]
test_labels = labels[num_train:]
test_data=torch.tensor(test_data.to_numpy()).float()
train_data=torch.tensor(train_data.to_numpy()).float()
test_labels=torch.tensor(test_labels.to_numpy(dtype='int64')).long()
train_labels=torch.tensor(train_labels.to_numpy(dtype='int64')).long()
if is_train:
final_data = []
for i in np.arange(len(train_data)):
final_data.append([i,train_data[i], train_labels[i]])
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
else:
final_data = []
for i in np.arange(len(test_data)):
final_data.append([i,test_data[i], test_labels[i]])
dataloader = torch.utils.data.DataLoader(final_data, batch_size=params.batch_size)
n_data=len(final_data)
print('Datasize', n_data)
return dataloader,n_data
elif params.dataset == 'lfw':
lfw_people = fetch_lfw_people(data_home=params.data_root,min_faces_per_person=100, resize=0.4)
n_samples, h, w = lfw_people.images.shape
lfw_images=torch.tensor(lfw_people.images).float()
lfw_targets=torch.tensor(lfw_people.target).long()
if is_train:
ids=np.arange(1140)[:912]
else:
ids=np.arange(1140)[912:]
final_data = []
for i in ids:
image=lfw_images[i].reshape((h, w)).unsqueeze(0)
target=lfw_targets[i]
final_data.append([i,image, target])
params.num_classes = 5
if mask is not None:
final_data = MaskDataset(final_data, mask)
dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
n_data=len(final_data)
return dataloader, n_data
elif params.dataset == "dummy":
# Creates a dummy dataset for NLP
n_data, delta = 10000, 3
data = torch.randint(-delta, delta, size=(n_data, params.seq_len))
data = torch.cumsum(data, dim=1)
data = torch.remainder(data, params.n_vocab)
iterator = TextIterator(data.view(-1), params.batch_size, params.seq_len)
return iterator, n_data
def get_gaussian_dataset(n,num_classes,num_dimensions,random_seed,scale=1):
np.random.seed(random_seed)
mu = [(2*np.random.rand(num_dimensions) - 1) * scale for c in range(num_classes)]
S = np.diag(np.random.rand(num_dimensions)) + 0.5
np.random.seed(np.random.randint(1000))
x = np.concatenate([multivariate_normal(mu[c], S, n//num_classes) for c in range(num_classes)])
y = np.concatenate([np.ones(n//num_classes) * c for c in range(num_classes)])
x=torch.tensor(x).float()
y=torch.tensor(y).long()
return x, y | calibration_membership-main | datasets/__init__.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class TextIterator:
def __init__(self, sequence, batch_size, seq_len):
assert sequence.ndim == 1
self.batch_size = batch_size
self.sequence = sequence.view(seq_len, -1)
self.i_batch = 0
def __iter__(self):
self.i_batch = 0
return self
def __next__(self):
if (self.i_batch + 1) * self.batch_size < self.sequence.size(1):
start = self.i_batch * self.batch_size
end = (self.i_batch + 1) * self.batch_size
self.i_batch += 1
return torch.arange(start, end), self.sequence[:, start:end]
else:
raise StopIteration
| calibration_membership-main | datasets/text_data.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
# Taken from https://github.com/facebookresearch/XLM
import argparse
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
| calibration_membership-main | utils/misc.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from datetime import timedelta
import logging
import re
import sys
import time
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime('%x %X'),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ''
def create_logger(params):
"""
Create a logger and print params
"""
# create log formatter
log_formatter = LogFormatter()
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(console_handler)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match('^[a-zA-Z0-9_]+$', x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
params.command = ' '.join(command)
logger.info("============ Initialized logger ============")
logger.info("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(params)).items())))
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("Running command: %s" % params.command)
return logger
| calibration_membership-main | utils/logger.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
import operator
def to_mask(n_data, indices):
mask = torch.zeros(n_data, dtype=bool)
mask[indices] = 1
return mask
def multiply_round(n_data, cfg):
s_total = sum(cfg.values())
sizes = {name: int(s * n_data / s_total) for name, s in cfg.items()}
max_name = max(sizes.items(), key=operator.itemgetter(1))[0]
sizes[max_name] += n_data - sum(sizes.values())
return sizes
def generate_masks(n_data, split_config):
assert type(split_config) is dict
assert "public" in split_config and "private" in split_config
assert type(split_config["private"]) is dict
permutation = np.random.permutation(n_data)
if type(split_config["public"]) is dict:
n_public=int(sum(split_config["public"].values())*n_data)
else:
n_public = int(split_config["public"] * n_data)
n_private = n_data - n_public
known_masks = {}
known_masks["public"] = to_mask(n_data, permutation[:n_public])
known_masks["private"] = to_mask(n_data, permutation[n_public:])
hidden_masks = {}
hidden_masks["private"] = {}
sizes = multiply_round(n_private, split_config["private"])
print(' Private', sizes)
offset = n_public
for name, size in sizes.items():
hidden_masks["private"][name] = to_mask(n_data, permutation[offset:offset+size])
offset += size
assert offset == n_data
if type(split_config["public"]) is dict:
hidden_masks["public"] = {}
public_sizes = multiply_round(n_public, split_config["public"])
print('Public', public_sizes)
public_offset = 0
for name, size in public_sizes.items():
hidden_masks["public"][name] = to_mask(n_data, permutation[public_offset:public_offset+size])
public_offset += size
assert public_offset == n_public
else:
hidden_masks["public"] = known_masks["public"]
return known_masks, hidden_masks
def evaluate_masks(guessed_membership, private_masks, threshold, attack_base=None):
if attack_base=='loss' or attack_base=='mean':
true_positives = (guessed_membership[private_masks["train"]] <= threshold).float()
false_negatives= (guessed_membership[private_masks["train"]] > threshold).float()
true_negatives = (guessed_membership[private_masks["heldout"]] > threshold).float()
false_positives = (guessed_membership[private_masks["heldout"]] <= threshold).float()
else:
true_positives = (guessed_membership[private_masks["train"]] >= threshold).float()
false_negatives = (guessed_membership[private_masks["train"]] < threshold).float()
true_negatives = (guessed_membership[private_masks["heldout"]] < threshold).float()
false_positives = (guessed_membership[private_masks["heldout"]] >= threshold).float()
fpr=torch.sum(false_positives) / (torch.sum(false_positives) + torch.sum(true_negatives))
recall = torch.sum(true_positives) / torch.sum(private_masks["train"].float())
precision = torch.sum(true_positives) / (torch.sum(true_positives) + torch.sum(false_positives))
accuracy = (torch.sum(true_positives) + torch.sum(true_negatives)) / (torch.sum(private_masks["heldout"].float()) + torch.sum(private_masks["train"].float()))
return fpr, precision, recall, accuracy
| calibration_membership-main | utils/masks.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import inspect
import json
import itertools
from torch import optim
import numpy as np
from logging import getLogger
from opacus import PrivacyEngine
import opacus.privacy_analysis as privacy_analysis
logger = getLogger()
def repeat(l, r):
"""
Repeat r times each value of list l.
"""
return list(itertools.chain.from_iterable(itertools.repeat(x, r) for x in l))
def repeat_to(l, r):
"""
Repeat values in list l so that it has r values
"""
assert r % len(l) == 0
return repeat(l, r // len(l))
def get_optimizer(parameters, opt_config, epochs):
"""
Parse optimizer parameters.
opt_config should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
lr_schedule = None
if "," in opt_config:
method = opt_config[:opt_config.find(',')]
optim_params = {}
for x in opt_config[opt_config.find(',') + 1:].split(','):
# e.g. split = ('lr', '0.1-0.01) or split = ('weight_decay', 0.001)
split = x.split('=')
assert len(split) == 2
param_name, param_value = split
assert any([
re.match(r"^[+-]?(\d+(\.\d*)?|\.\d+)$", param_value) is not None,
param_name == "lr" and re.match(r"^[+-]?(\d+(\.\d*)?|\.\d+)$", param_value) is not None,
param_name == "lr" and ("-" in param_value),
param_name == "lr" and re.match(r"^cos:[+-]?(\d+(\.\d*)?|\.\d+)$", param_value) is not None
])
if param_name == "lr":
if param_value.startswith("cos:"):
lr_init = float(param_value[4:])
lr_schedule = [lr_init * (1 + np.cos(np.pi * epoch / epochs)) / 2 for epoch in range(epochs)]
else:
lr_schedule = [float(lr) for lr in param_value.split("-")]
optim_params[param_name] = float(lr_schedule[0])
lr_schedule = repeat_to(lr_schedule, epochs)
else:
optim_params[param_name] = float(param_value)
else:
method = opt_config
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
logger.info("Schedule of %s: %s" % (opt_config, str(lr_schedule)))
return optim_fn(parameters, **optim_params), lr_schedule
PRIVACY_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))
def getNoiseMultiplier(epsilon, delta, q, steps):
sigma_min, sigma_max = 0.01, 10
while sigma_max - sigma_min > 0.01:
sigma = (sigma_min + sigma_max) / 2
rdp = privacy_analysis.compute_rdp(q, sigma, steps, PRIVACY_ALPHAS)
eps = privacy_analysis.get_privacy_spent(PRIVACY_ALPHAS, rdp, delta)[0]
if eps < epsilon:
sigma_max = sigma
else:
sigma_min = sigma
logger.info(f"Inferred σ={sigma} for ε={epsilon}, δ={delta}")
logger.info("__log__:%s" % json.dumps({"noise_multiplier": sigma}))
return sigma
def create_privacy_engine(model, params, n_data):
if params.private:
if params.noise_multiplier is None:
_n_data = n_data# if params.privacy_fake_samples is None else params.privacy_fake_samples
sample_rate = params.batch_size / _n_data
steps = params.epochs * _n_data / params.batch_size
params.noise_multiplier = getNoiseMultiplier(params.privacy_epsilon, params.privacy_delta, sample_rate, steps)
if params.max_grad_norm == "mean":
max_grad_norm = 1.0
else:
max_grad_norm = params.max_grad_norm
else:
max_grad_norm = float("inf")
params.noise_multiplier = 0
if params.private or params.log_gradients:
if params.log_gradients and not params.private:
logger.info("Creating privacy engine to compute per sample gradients and log them.")
privacy_engine = PrivacyEngine(
model,
batch_size=params.batch_size,
sample_size=n_data,
alphas=PRIVACY_ALPHAS,
noise_multiplier=params.noise_multiplier,
max_grad_norm=max_grad_norm
)
else:
privacy_engine = None
return privacy_engine | calibration_membership-main | utils/optimizer.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import OrderedDict
import functools
import os
import time
import numpy as np
import torch
from torch.nn import functional as F
from logging import getLogger
from .optimizer import get_optimizer, create_privacy_engine
from utils.evaluator import accuracy
logger = getLogger()
def log_grad(trainer, param_name, *args, **kwargs):
if param_name is not None:
g = kwargs['per_sample_grad']
trainer.current_grad_sample.append(g.view(g.size(0), -1).clone())
else:
trainer.current_grad_sample = torch.cat(trainer.current_grad_sample, dim=1)
class Trainer:
def __init__(self, model, params, n_data=-1):
# model / params
self.model = model
self.params = params
# set optimizers
self.n_data = n_data
if params.private and params.privacy_delta is None:
params.privacy_delta = 1 / n_data
print(f"Setting privacy delta to {params.privacy_delta}")
self.privacy_engine = create_privacy_engine(model, params, n_data=n_data)
self.optimizer, self.schedule = get_optimizer(model.parameters(), params.optimizer, params.epochs)
if self.privacy_engine is not None:
self.privacy_engine.attach(self.optimizer)
if params.log_gradients:
self.privacy_engine.clipper.set_on_batch_clip_func(functools.partial(log_grad, self))
self.current_grad_sample = []
self.all_grad_samples = None
# training statistics
self.epoch = 0
self.batch=0
self.indices = []
self.n_iter = 0
self.step = 0
self.stats = OrderedDict(
[('processed_i', 0)] +
[('train_loss', [])] +
[('time', [])]+[('train_acc', [])]
)
self.last_time = time.time()
def update_learning_rate(self):
"""
Sets the learning rate to follow the learning schedule
"""
if self.schedule is None:
return
lr = self.schedule[self.epoch]
logger.info("New learning rate for %f" % lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def end_step(self):
self.n_iter += 1
self.step += 1
self.print_stats()
def print_stats(self):
"""
Prints statistics about the training.
Statistics are computed on batches since the last print.
(i.e. if printing every 5 batches then it shows speed on the last 5 batches)
"""
if self.n_iter % self.params.print_freq != 0:
return
s_iter = f"Batch {self.n_iter} - "
s_stat = ''
s_stat += ' || '.join([
'{}: {:7.4f}'.format(k, np.mean(v[-self.params.print_freq:])) for k, v in self.stats.items()
if type(v) is list and len(v) > 0
])
# learning rates
s_lr = ""
s_lr = s_lr + (" - LR: ") + " / ".join("{:.4e}".format(group['lr']) for group in self.optimizer.param_groups)
# processing speed
new_time = time.time()
diff = new_time - self.last_time
s_speed = "{:7.2f} images/s - ".format(self.stats['processed_i'] * 1.0 / diff)
self.stats['processed_i'] = 0
self.last_time = new_time
# log speed + stats + learning rate
logger.info(s_iter + s_speed + s_stat + s_lr)
def save(self, name):
"""
Save the model.
"""
path = os.path.join(self.params.dump_path, name)
state_dict = self.state_dict()
logger.info("Saving model to %s ..." % path)
torch.save(state_dict, path)
def save_interim_model(self, name, idx):
"""
Save the model.
"""
path = os.path.join(self.params.dump_path, name)
state_dict = self.state_dict()
logger.info("Saving model and batch ids to %s ..." % path)
torch.save([state_dict, idx], path)
def state_dict(self):
r"""
Returns state_dict, i.e. model parameters as well as general parameters
"""
model = self.model
data = {
'model': model.state_dict(),
'epoch': self.epoch,
'params': vars(self.params)
}
data['optimizer'] = self.optimizer.state_dict()
if self.params.private:
data['privacy_engine'] = self.privacy_engine.state_dict()
if self.params.log_gradients:
data['gradients'] = self.all_grad_samples
return data
def reload_checkpoint(self):
"""
Reload a checkpoint if we find one.
"""
checkpoint_path = os.path.join(self.params.dump_path, "checkpoint.pth")
if not os.path.isfile(checkpoint_path):
return
logger.warning('Reloading checkpoint from %s ...' % checkpoint_path)
device='cuda:'+str(torch.cuda.current_device())
state_dict = torch.load(checkpoint_path, map_location=device)
new_state_dict = OrderedDict()
for k, v in state_dict["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
self.model.load_state_dict(new_state_dict)
# else:
# new_model.load_state_dict(state_dict_new['model'])
# self.model.load_state_dict(state_dict['model'], strict=False)
if self.params.private:
self.privacy_engine.load_state_dict(state_dict['privacy_engine'])
# reload optimizer
self.optimizer.load_state_dict(state_dict['optimizer'])
# reload stats
self.epoch = state_dict['epoch'] + 1
logger.warning('Checkpoint reloaded. Resuming at epoch %i ...' % self.epoch)
def end_epoch(self, scores):
# Update learning rate
self.update_learning_rate()
# Reset statistics
for k in self.stats.keys():
if type(self.stats[k]) is list:
del self.stats[k][:]
self.epoch += 1
self.batch = 0
# Save checkpoints
self.save("checkpoint.pth")
if self.params.save_periodic > 0 and self.epoch % self.params.save_periodic == 0:
self.save("periodic-%d.pth" % self.epoch)
self.all_grad_samples = None
def maybe_log_gradients(self, idx):
# Log per sample gradient
if self.params.log_gradients:
if self.all_grad_samples is None:
self.all_grad_samples = torch.zeros(self.n_data, self.current_grad_sample.size(1), dtype=self.current_grad_sample.dtype, device=torch.device('cpu'))
self.all_grad_samples[idx] = self.current_grad_sample.cpu()
self.current_grad_sample = []
def maybe_log_model(self, idx):
#log model before gradient updates and minibatch ids
if self.params.log_batch_models:
#save model
model_file_name='checkpoint_epoch_'+str(self.epoch)+'_batch_'+ str(self.batch)+'.pth'
self.save_interim_model(model_file_name, idx)
#save ids
def maybe_log_epoch_model(self, idx):
#log model before gradient updates and minibatch ids
if self.params.log_epoch_models:
#save model
model_file_name='checkpoint_epoch_'+str(self.epoch)+'.pth'
self.save_interim_model(model_file_name, idx)
def lm_step(self, idx, sentence):
"""
Language modeling step.
"""
start = time.time()
self.model.train()
sentence = sentence.cuda(non_blocking=True)
# Forward + loss
output = self.model(sentence[:, 1:])
loss = F.cross_entropy(output.view(-1, output.size(-1)), sentence[:, :-1].reshape(-1), reduction='mean')
# Gradient step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.maybe_log_gradients(idx)
self.batch+=1
# statistics
self.stats['processed_i'] += self.params.batch_size
self.stats['train_loss'].append(loss.item())
self.stats['time'].append(time.time() - start)
def classif_step(self, idx, images, targets):
"""
Classification step.
"""
start = time.time()
self.maybe_log_model(idx)
if self.batch==0:
self.maybe_log_epoch_model(idx)
self.model.train()
images = images.cuda(non_blocking=True)
# Forward + loss
output = self.model(images)
loss = F.cross_entropy(output, targets.cuda(non_blocking=True), reduction='mean')
# Gradient step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.maybe_log_gradients(idx)
self.batch+=1
# statistics
self.stats['processed_i'] += self.params.batch_size
self.stats['train_loss'].append(loss.item())
self.stats['time'].append(time.time() - start)
self.stats['train_acc'].append(accuracy(output.cpu(), targets, topk=tuple([1]))[0])
def get_scores(self):
scores = {
"speed": self.params.batch_size / np.mean(self.stats['time']),
"learning_rate": self.schedule[self.epoch]
}
if self.params.private:
scores["privacy_epsilon"] = self.privacy_engine.get_privacy_spent(1 / self.n_data)[0]
for stat_name in self.stats.keys():
if type(self.stats[stat_name]) is list and len(self.stats[stat_name]) >= 1:
scores[stat_name] = np.mean(self.stats[stat_name])
return scores
| calibration_membership-main | utils/trainer.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
from collections import OrderedDict
import numpy as np
import torch
from torch.nn import functional as F
logger = getLogger()
class Evaluator(object):
# TODO: get ridd of params and only give model in eval()
def __init__(self, model, params):
"""
Initialize evaluator.
"""
self.model = model
self.params = params
@torch.no_grad()
def run_all_evals(self, evals, data_loader, *args, **kwargs):
"""
Run all evaluations.
"""
assert type(evals) is list
scores = OrderedDict()
if evals is None or 'classif' in evals:
self.eval_classif(scores, data_loader)
return scores
def eval_classif(self, scores, data_loader):
"""
Evaluate classification.
"""
params = self.params
self.model.eval()
# stats
losses=[]
accuracies = []
topk = [1, 5, 10, 20, 50, 100, 200, 500]
topk = [k for k in topk if k <= params.num_classes]
for _, images, targets in data_loader:
images = images.cuda()
output = self.model(images)
loss = F.cross_entropy(output, targets.cuda(non_blocking=True), reduction='mean')
accuracies.append(accuracy(output.cpu(), targets, topk=tuple(topk)))
losses.append(loss.item())
# loss
scores['valid_loss']=np.mean(losses)
# accuracy
for i_k, k in enumerate(topk):
scores['valid_top%d_acc' % k] = np.mean([x[i_k] for x in accuracies])
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k.
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].float().sum()
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
| calibration_membership-main | utils/evaluator.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class KLLeNet(nn.Module):
def __init__(self, params):
super().__init__()
self.conv1 = nn.Conv2d(params.in_channels, 20, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(20, 50, 5)
#for cifar it's 5x5
#for mnist it's 4x4
#for lfw it's 9x6
self.fc1 = nn.Linear(50 * 4 * 4, 500)
self.fc2 = nn.Linear(500, params.num_classes)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x=self.dropout1(x)
x = self.pool(F.relu(self.conv2(x)))
x=self.dropout1(x)
x = x.view(-1, 50 * 4 * 4)
x = F.relu(self.fc1(x))
x=self.dropout2(x)
x = self.fc2(x)
return x
| calibration_membership-main | models/KLlenet.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class LinearNet(nn.Module):
def __init__(self, params):
super().__init__()
self.fc = nn.Linear(params.data_num_dimensions, params.num_classes)
def forward(self, x):
x = self.fc(x)
x=F.softmax(x)
return x
| calibration_membership-main | models/linear.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
from .lenet import LeNet
from .KLlenet import KLLeNet
from .lstmlm import LSTMLM
from .alexnet import AlexNet
from .linear import LinearNet
from .mlp import MLP
import torchvision.models as models
def build_model(params):
if params.architecture == "lenet":
return LeNet(params)
elif params.architecture == "kllenet":
return KLLeNet(params)
elif params.architecture == "linear":
return LinearNet(params)
elif params.architecture == "mlp":
return MLP(params)
elif params.architecture=="alexnet":
return AlexNet(params)
elif params.architecture == "lstm":
return LSTMLM(params)
elif params.architecture == "resnet18":
return models.resnet18(pretrained=False)
elif params.architecture == "smallnet":
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, params.num_classes, bias=True),
)
elif params.architecture == "leaks":
return nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(128, params.num_classes, bias=True),
)
| calibration_membership-main | models/__init__.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, params):
super().__init__()
self.fc = nn.Linear(params.data_num_dimensions, 2*params.data_num_dimensions)
self.fc2 = nn.Linear(2*params.data_num_dimensions, params.num_classes)
def forward(self, x):
x = F.relu(self.fc(x))
x = self.fc2(x)
x = F.softmax(x)
return x
| calibration_membership-main | models/mlp.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self, params):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, params.num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| calibration_membership-main | models/lenet.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
from opacus.layers import DPLSTM
import torch
import torch.nn as nn
class LSTMLM(nn.Module):
def __init__(self, params):
super().__init__()
self.embedding = nn.Embedding(params.n_vocab, params.embedding_dim)
assert not params.private
if params.private or params.log_gradients:
self.lstm = DPLSTM(input_size=params.embedding_dim, hidden_size=params.hidden_dim, num_layers=params.num_layers)
else:
self.lstm = nn.LSTM(input_size=params.embedding_dim, hidden_size=params.hidden_dim, num_layers=params.num_layers)
self.prediction = nn.Linear(params.embedding_dim, params.n_vocab)
def forward(self, x):
x = self.embedding(x)
output, (hn, cn) = self.lstm(x)
return self.prediction(output) | calibration_membership-main | models/lstmlm.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, params):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, params.num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| calibration_membership-main | models/alexnet.py |
calibration_membership-main | attacks/__init__.py |
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from posixpath import join
import sys
import inspect
import math
from random import randrange
import pickle
import copy
import numpy as np
import pandas as pd
import argparse
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
import sklearn.metrics as metrics
from sklearn.datasets import fetch_openml
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from torch.utils.data import Subset
from training.image_classification import train
from utils.masks import to_mask, evaluate_masks
from torch.nn import functional as F
from models import build_model
from utils.misc import bool_flag
from utils.masks import to_mask
from attacks.privacy_attacks import get_parser
from opacus.grad_sample import GradSampleModule
from cleverhans.torch.attacks.hop_skip_jump_attack import hop_skip_jump_attack
from cleverhans.torch.attacks.carlini_wagner_l2 import carlini_wagner_l2
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
def get_parser():
"""
Generate a parameters parser.
"""
parser = argparse.ArgumentParser(description='Privacy attack parameters')
# config parameters
parser.add_argument("--dump_path", type=str, default=None) # model saving location
parser.add_argument('--print_freq', type=int, default=50) # training printing frequency
parser.add_argument("--save_periodic", type=int, default=0) # training saving frequency
# attack parameters
parser.add_argument("--model_path", type=str, default="model") # path to the private model
parser.add_argument("--attack_type", type=str, default="loss") # type of auxiliary attack
parser.add_argument("--aux_epochs", type=int, default=20) # number of auxiliary training epochs
parser.add_argument("--num_aux", type=int, default=1) # number of auxiliary models
parser.add_argument("--aug_style", type=str, default="mean") # combination method for augmented data values
parser.add_argument("--aux_style", type=str, default="sum") # combination method for multiple aux. model values
parser.add_argument("--public_data", type=str, default="train") # specify which part of the public data to use for aux model training (e.g. train is the training mask, rand50 is a random selection of the public data)
parser.add_argument("--norm_type", type=str, default=None) # norm for gradient norm
parser.add_argument("--num_points", type=int, default=10) # number of points to use for the label-only attack
parser.add_argument("--clip_min", type=float, default=0) # minimum value for adversarial feature in label-only attack
parser.add_argument("--clip_max", type=float, default=1) # maximum value for adversarial feature in label-only attack
# Data parameters
parser.add_argument("--data_root", type=str, default="data") # path to the data
parser.add_argument("--dataset", type=str, choices=["cifar10", "imagenet", "cifar100", "gaussian","credit", "hep", "adult", "mnist", "lfw"], default="cifar10")
parser.add_argument("--mask_path", type=str, required=True) # path to the data mask
parser.add_argument('--n_data', type=int, default=500) # specify number of data points for gaussian data
parser.add_argument('--data_num_dimensions', type=int, default=75) # number of features for non-image data
parser.add_argument('--random_seed', type=int, default=10) # seed for gaussian data
parser.add_argument("--num_classes", type=int, default=10) # number of classes for classification task
parser.add_argument("--in_channels", type=int, default=3) # number of input channels for image data
# Model parameters
parser.add_argument("--architecture", choices=["lenet", "smallnet", "resnet18", "kllenet","linear", "mlp"], default="lenet")
# training parameters
parser.add_argument("--aug", type=bool_flag, default=False) # data augmentation flag
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--optimizer", default="sgd,lr=0.1,momentum=0.9")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--log_gradients", type=bool_flag, default=False)
parser.add_argument("--log_batch_models", type=bool_flag, default=False) # save model for each batch of data
parser.add_argument("--log_epoch_models", type=bool_flag, default=False) # save model for each training epoch
# privacy parameters
parser.add_argument("--private", type=bool_flag, default=False) # privacy flag
parser.add_argument("--noise_multiplier", type=float, default=None)
parser.add_argument("--privacy_epsilon", type=float, default=None)
parser.add_argument("--privacy_delta", type=float, default=None)
parser.add_argument("--max_grad_norm", type=float, default=1.0)
#multi gpu paramaeters
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--master_port", type=int, default=-1)
parser.add_argument("--debug_slurm", type=bool_flag, default=False)
return parser
def adult_data_transform(df):
"""
transform adult data.
"""
binary_data = pd.get_dummies(df)
feature_cols = binary_data[binary_data.columns[:-2]]
scaler = preprocessing.StandardScaler()
data = pd.DataFrame(scaler.fit_transform(feature_cols), columns=feature_cols.columns)
return data
def get_dataset(params):
"""
load data for privacy attacks
"""
if params.dataset=='cifar10':
if params.aug==True:
print('Using data augmentation')
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
model_transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation')
normalize = [transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
model_transform = transforms.Compose(normalize)
return torchvision.datasets.CIFAR10(root=params.data_root, train=True, download=True, transform=model_transform)
if params.dataset=='mnist':
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])
return torchvision.datasets.MNIST(root=params.data_root, train=True, download=True, transform=transform)
elif params.dataset=='imagenet':
if params.aug==True:
print('Using data augmentation to train model')
augmentations = [transforms.Resize(256),transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
transform = transforms.Compose(augmentations + normalize)
else:
print('Not using data augmentation to train model')
transform = transforms.Compose( [transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
dataset = torchvision.datasets.ImageFolder(root=params.data_root+'/train',transform=transform)
return dataset
elif params.dataset=='cifar100':
if params.aug:
augmentations = [transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip()]
normalize = [transforms.ToTensor(),transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])]
transform = transforms.Compose(augmentations + normalize)
else:
transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean=[n/255 for n in [129.3, 124.1, 112.4]], std=[n/255 for n in [68.2, 65.4, 70.4]])])
dataset = torchvision.datasets.CIFAR100(root=params.data_root, train=True, download=True, transform=transform)
return dataset
elif params.dataset=='credit':
cred=fetch_openml('credit-g')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(cred.data).transform(cred.data)
target = preprocessing.LabelEncoder().fit(cred.target).transform(cred.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
ids=np.arange(1000)[:800]
final_data = []
for i in ids:
final_data.append([data[i], target[i]])
# norm=np.max
params.num_classes = 2
# dataloader = torch.utils.data.DataLoader(final_data, shuffle=True, batch_size=params.batch_size)
# n_data=len(final_data)
return final_data
elif params.dataset=='hep':
hep=fetch_openml('hepatitis')
data = SimpleImputer(missing_values=np.nan, strategy='mean', copy=True).fit(hep.data).transform(hep.data)
target = preprocessing.LabelEncoder().fit(hep.target).transform(hep.target)
X=data
norm = np.max(np.concatenate((-1*X.min(axis=0)[np.newaxis], X.max(axis=0)[np.newaxis]),axis=0).T, axis=1).astype('float32')
data=np.divide(data,norm)
data=torch.tensor(data).float()
target=torch.tensor(target).long()
ids=np.arange(155)[:124]
final_data = []
for i in ids:
final_data.append([data[i], target[i]])
params.num_classes = 2
return final_data
elif params.dataset == 'adult':
columns = ["age", "workClass", "fnlwgt", "education", "education-num","marital-status", "occupation", "relationship","race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country", "income"]
train_data = pd.read_csv(params.data_root+'/adult.data', names=columns, sep=' *, *', na_values='?')
test_data = pd.read_csv(params.data_root+'/adult.test', names=columns, sep=' *, *', skiprows=1, na_values='?')
original_train=train_data
original_test=test_data
num_train = len(original_train)
original = pd.concat([original_train, original_test])
labels = original['income']
labels = labels.replace('<=50K', 0).replace('>50K', 1)
labels = labels.replace('<=50K.', 0).replace('>50K.', 1)
# Remove target
del original["income"]
data = adult_data_transform(original)
train_data = data[:num_train]
train_labels = labels[:num_train]
test_data = data[num_train:]
test_labels = labels[num_train:]
test_data=torch.tensor(test_data.to_numpy()).float()
train_data=torch.tensor(train_data.to_numpy()).float()
test_labels=torch.tensor(test_labels.to_numpy(dtype='int64')).long()
train_labels=torch.tensor(train_labels.to_numpy(dtype='int64')).long()
final_data = []
for i in np.arange(len(train_data)):
final_data.append([train_data[i], train_labels[i]])
return final_data
def get_uncalibrated_gradnorm(params, mask):
"""
return uncalibrated gradient norm values for data indicated by the mask.
"""
#load the dataset
dataset = get_dataset(params)
#initialize to 0
grad_norms=np.zeros(len(mask))
#get the final model
final_model=build_model(params)
final_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_final = torch.load(final_model_path, map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_final["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
final_model.load_state_dict(new_state_dict)
else:
final_model.load_state_dict(state_dict_final['model'])
final_model=final_model.cuda()
original_model=[]
for p in final_model.parameters():
original_model.append(p.view(-1))
original_model=torch.cat(original_model)
#get the appropriate ids to dot product
ids=(mask==True).nonzero().flatten().numpy()
#load 1-by-1. See get_calibrated_gradnorm for batched method using Opacus gradsamplemodule.
for id in ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#reload the original batch model, if imagenet may need to rename keys.
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_final["model"].items():
if k[:7]=='module.': # remove "module.""
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
final_model.load_state_dict(new_state_dict)
else:
final_model.load_state_dict(state_dict_final['model'])
# check the model gradient is zeros
final_model.zero_grad()
#get the gradient
output=final_model(image)
loss=F.cross_entropy(output, target)
loss.backward()
grads=[]
for param in final_model.parameters():
grads.append(param.grad.view(-1))
grads = torch.cat(grads)
g=grads.cpu().numpy()
grad_norms[id]=np.linalg.norm(g)
return grad_norms
def get_calibrated_gradnorm(params, private_model, private_params, attack_model,attack_params, ids, mask, aug_style='mean',norm_type=None):
"""
return calibrated gradient norm values.
"""
#load the dataset
dataset = get_dataset(params)
#initialize to 0
grad_norms=np.zeros(len(mask))
if params.aug:
batch_vals=[[0] for i in np.arange(len(mask))]
for t in np.arange(10):
batched_ids=np.array_split(ids, 1000)
for b_ids in batched_ids:
image_data=torch.stack([dataset[i][0] for i in b_ids])
image_data=image_data.cuda()
target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
target_data=target_data.cuda()
private_model.zero_grad()
out_private=private_model(image_data)
loss_private=F.cross_entropy(out_private, target_data)
loss_private.backward()
attack_model.zero_grad()
out_attack=attack_model(image_data)
loss_attack=F.cross_entropy(out_attack, target_data)
loss_attack.backward()
for i,id in enumerate(b_ids):
private_grads=[]
for param in private_model.parameters():
private_grads.append(param.grad_sample[i].view(-1))
private_grads = torch.cat(private_grads)
attack_grads=[]
for param in attack_model.parameters():
attack_grads.append(param.grad_sample[i].view(-1))
attack_grads = torch.cat(attack_grads)
g_private=private_grads.cpu().numpy()
g_attack=attack_grads.cpu().numpy()
if norm_type=='inf':
batch_vals[id].append(max(g_private-g_attack))
else:
if norm_type=='1':
norm_type=1
elif norm_type=='2':
norm_type=2
elif norm_type=='3':
norm_type=3
batch_vals[id].append(np.linalg.norm(g_private, ord=norm_type)-np.linalg.norm(g_attack,ord=norm_type))
for id in ids:
if aug_style=='mean':
grad_norms[id]=np.mean(batch_vals[id][1:])
elif aug_style=='max':
grad_norms[id]=np.max(batch_vals[id][1:])
elif aug_style=='median':
grad_norms[id]=np.median(batch_vals[id][1:])
elif aug_style=='std':
grad_norms[id]=np.std(batch_vals[id][1:])
else:
batched_ids=np.array_split(ids, 1000)
for b_ids in batched_ids:
image_data=torch.stack([dataset[i][0] for i in b_ids])
image_data=image_data.cuda()
target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
target_data=target_data.cuda()
private_model.zero_grad()
out_private=private_model(image_data)
loss_private=F.cross_entropy(out_private, target_data)
loss_private.backward()
attack_model.zero_grad()
out_attack=attack_model(image_data)
loss_attack=F.cross_entropy(out_attack, target_data)
loss_attack.backward()
for i,id in enumerate(b_ids):
private_grads=[]
for param in private_model.parameters():
private_grads.append(param.grad_sample[i].view(-1))
private_grads = torch.cat(private_grads)
attack_grads=[]
for param in attack_model.parameters():
attack_grads.append(param.grad_sample[i].view(-1))
attack_grads = torch.cat(attack_grads)
g_private=private_grads.cpu().numpy()
g_attack=attack_grads.cpu().numpy()
if norm_type=='inf':
grad_norms[id]=max(g_private-g_attack)
else:
if norm_type=='1':
norm_type=1
elif norm_type=='2':
norm_type=2
elif norm_type=='3':
norm_type=3
grad_norms[id]=np.linalg.norm(g_private, ord=norm_type)-np.linalg.norm(g_attack,ord=norm_type)
return grad_norms
def calibrated_gradient_attack(params):
"""
run a calibrated gradient attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the attack model parameters
original_attack_model=[]
for p in attack_model.parameters():
original_attack_model.append(p.view(-1))
original_attack_model=torch.cat(original_attack_model)
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
original_private_model=[]
for p in private_model.parameters():
original_private_model.append(p.view(-1))
original_private_model=torch.cat(original_private_model)
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
# reload model to allow use of gradsamplemodule
new_model=build_model(params)
new_model_path = os.path.join(params.dump_path, "checkpoint.pth")
state_dict_new = torch.load(new_model_path)
new_model.load_state_dict(state_dict_new['model'])
new_model=new_model.cuda()
private_model=GradSampleModule(private_model)
attack_model=GradSampleModule(new_model)
train_dots=get_calibrated_gradnorm(params, private_model, original_private_model, attack_model,original_attack_model,private_train_ids,hidden_masks['private']['train'])
heldout_dots=get_calibrated_gradnorm(params, private_model, original_private_model, attack_model,original_attack_model,private_heldout_ids,hidden_masks['private']['heldout'])
return train_dots, heldout_dots
def get_calibrated_losses(params, private_model, attack_model, ids, mask, aug_style='mean'):
"""
return calibrated losses
"""
#load the dataset
dataset = get_dataset(params)
#initialize dot products to 0
losses=np.zeros(len(mask))
if params.aug:
summed_loss=[[0] for i in np.arange(len(mask))]
for j in np.arange(10):
print('aug',j)
batched_ids=np.array_split(ids, 1000)
for b_ids in batched_ids:
image_data=torch.stack([dataset[i][0] for i in b_ids])
image_data=image_data.cuda()
target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
target_data=target_data.cuda()
out_private=private_model(image_data)
out_attack=attack_model(image_data)
for i,id in enumerate(b_ids):
output=out_private[i].unsqueeze(0)
loss=F.cross_entropy(output, target_data[i].unsqueeze(0))
attack_output=out_attack[i].unsqueeze(0)
attack_loss=F.cross_entropy(attack_output, target_data[i].unsqueeze(0))
loss_diff=loss-attack_loss
summed_loss[id].append(loss_diff.cpu().detach().numpy())
for id in ids:
if aug_style=='mean':
losses[id]=np.mean(summed_loss[id][1:])
elif aug_style=='max':
losses[id]=np.max(summed_loss[id][1:])
elif aug_style=='median':
losses[id]=np.median(summed_loss[id][1:])
elif aug_style=='std':
losses[id]=np.std(summed_loss[id][1:])
else:
for id in ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#get the loss
output=private_model(image)
loss=F.cross_entropy(output, target)
attack_output=attack_model(image)
attack_loss=F.cross_entropy(attack_output, target)
losses[id]=loss-attack_loss
return losses
def get_calibrated_confidences(params, private_model, attack_model, ids, mask, aug_style='mean'):
"""
return calibrated confidences.
"""
#load the dataset
dataset = get_dataset(params)
#initialize dot products to 0
confidences=np.zeros(len(mask))
if params.aug:
summed_confs=[[0] for i in np.arange(len(mask))]
for j in np.arange(10):
print('Aug', j)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
log_softmax = torch.nn.LogSoftmax(dim=1)
output=private_model(images)
attack_output=attack_model(images)
log_output=log_softmax(output)
log_attack_output=log_softmax(attack_output)
private_confidences,_=torch.max(log_output,dim=1)
attack_confidences,_=torch.max(log_attack_output,dim=1)
confs=private_confidences-attack_confidences
confs=confs.cpu().detach().numpy()
for i,id in enumerate(ids):
summed_confs[id].append(confs[i])
for id in ids:
if aug_style=='mean':
confidences[id]=np.mean(summed_confs[id][1:])
elif aug_style=='max':
confidences[id]=np.max(summed_confs[id][1:])
elif aug_style=='median':
confidences[id]=np.median(summed_confs[id][1:])
elif aug_style=='std':
confidences[id]=np.std(summed_confs[id][1:])
else:
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
log_softmax = torch.nn.LogSoftmax(dim=1)
output=private_model(images)
attack_output=attack_model(images)
log_output=log_softmax(output)
log_attack_output=log_softmax(attack_output)
private_confidences,_=torch.max(log_output,dim=1)
attack_confidences,_=torch.max(log_attack_output,dim=1)
confidences=private_confidences-attack_confidences
return confidences
def calibrated_loss_attack(params):
"""
run a calibrated loss attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
train_losses=get_calibrated_losses(params, private_model, attack_model,private_train_ids,hidden_masks['private']['train'])
heldout_losses=get_calibrated_losses(params, private_model, attack_model,private_heldout_ids,hidden_masks['private']['heldout'])
return train_losses, heldout_losses
def calibrated_confidence_attack(params):
"""
run a calibrated confidence attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
train_losses=get_calibrated_confidences(params, private_model, attack_model,private_train_ids,hidden_masks['private']['train'])
heldout_losses=get_calibrated_confidences(params, private_model, attack_model,private_heldout_ids,hidden_masks['private']['heldout'])
return train_losses, heldout_losses
def auxiliary_attack(params, aux_epochs, attack_type='loss', aug_style='mean', norm_type=None, public_data='train', num_aux=1,aux_style='sum'):
"""
run an auxiliary attack, type (loss, grad_norm, conf, dist) specified by attack_type.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path,map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_private["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
private_model.load_state_dict(new_state_dict)
else:
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
# updated_params=copy.deepcopy(params)
updated_params=params
updated_params.epochs=updated_params.epochs+aux_epochs
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
train_losses=np.zeros(len(known_masks['public']))
heldout_losses=np.zeros(len(known_masks['public']))
for i in np.arange(num_aux):
if params.dataset=='cifar10' or params.dataset=='credit' or params.dataset=='hep' or params.dataset=='adult' or params.dataset=='mnist':
model_num=params.model_path[-6:-5]
elif params.dataset=='cifar100':
model_num=params.model_path[-15:-14]
else:
model_num='0'
new_model_path='updated_model_'+str(aux_epochs) +'_'+str(params.batch_size)+'_'+params.optimizer+'_aux_model_'+str(i)+'_num_aux_'+str(num_aux)+'_public_data_'+params.public_data+'_model_'+model_num
if not os.path.isdir(new_model_path):
os.mkdir(new_model_path)
updated_params.dump_path=new_model_path
if updated_params.local_rank!=-1:
updated_params.local_rank=-1
path = os.path.join(updated_params.dump_path, 'checkpoint.pth')
torch.save(state_dict_private, path)
if public_data=='train':
print('Using public training data for auxiliary model')
updated_model=train(updated_params, hidden_masks['public']['train'])
elif public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
updated_model=train(updated_params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
updated_model=train(updated_params, known_masks['public'])
updated_model=updated_model.cuda()
new_model=build_model(params)
new_model_path=os.path.join(updated_params.dump_path, "checkpoint.pth")
state_dict_new=torch.load(new_model_path,map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_new["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
new_model.load_state_dict(new_state_dict)
else:
new_model.load_state_dict(state_dict_new['model'])
new_model=new_model.cuda()
#get losses
if attack_type=='loss':
train_vals=get_calibrated_losses(params, private_model, updated_model,private_train_ids,hidden_masks['private']['train'], aug_style)
heldout_vals=get_calibrated_losses(params, private_model, updated_model,private_heldout_ids,hidden_masks['private']['heldout'], aug_style)
elif attack_type=='conf':
train_vals=get_calibrated_confidences(params, private_model, updated_model,private_train_ids,hidden_masks['private']['train'], aug_style)
heldout_vals=get_calibrated_confidences(params, private_model, updated_model,private_heldout_ids,hidden_masks['private']['heldout'], aug_style)
elif attack_type=='dist':
private_train_ids=private_train_ids[np.random.choice(len(private_train_ids), size=params.num_points, replace=False)]
private_heldout_ids=private_heldout_ids[np.random.choice(len(private_heldout_ids), size=params.num_points, replace=False)]
train_vals=get_calibrated_distances(params, private_model, updated_model,private_train_ids)
heldout_vals=get_calibrated_distances(params, private_model, updated_model,private_heldout_ids)
else:
original_private_model=[]
for p in private_model.parameters():
original_private_model.append(p.view(-1))
original_private_model=torch.cat(original_private_model)
original_updated_model=[]
for p in new_model.parameters():
original_updated_model.append(p.view(-1))
original_updated_model=torch.cat(original_updated_model)
if i==0:
private_model=GradSampleModule(private_model)
attack_model=GradSampleModule(new_model)
train_vals=get_calibrated_gradnorm(params, private_model,original_private_model, attack_model,original_updated_model,private_train_ids,hidden_masks['private']['train'], aug_style=aug_style, norm_type=norm_type)
heldout_vals=get_calibrated_gradnorm(params, private_model, original_private_model,attack_model,original_updated_model,private_heldout_ids,hidden_masks['private']['heldout'], aug_style=aug_style,norm_type=norm_type)
if aux_style=='max':
train_losses=np.maximum(train_losses, train_vals)
heldout_losses=np.maximum(heldout_losses, heldout_vals)
else:
if params.attack_type=='conf' or params.attack_type=='dist':
train_losses=train_vals
heldout_losses=heldout_vals
else:
train_losses+=train_vals
heldout_losses+=heldout_vals
if aux_style=='mean':
train_losses=train_losses/num_aux
heldout_losses=heldout_losses/num_aux
return train_losses, heldout_losses
def get_losses(params):
"""
return uncalibrated losses.
"""
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path,map_location='cuda:0')
if params.dataset=='imagenet':
new_state_dict = OrderedDict()
for k, v in state_dict_private["model"].items():
if k[:7]=='module.': # remove `module.`
new_state_dict[k[7:]] = v
else:
new_state_dict[k]=v
private_model.load_state_dict(new_state_dict)
else:
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
#load the dataset
dataset = get_dataset(params)
#initialize dot products to 0
train_losses=[]
heldout_losses=[]
for id in private_train_ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#get the loss
output=private_model(image)
loss=F.cross_entropy(output, target).item()
train_losses.append(loss)
for id in private_heldout_ids:
#load each image and target
image = dataset[id][0].unsqueeze(0)
image = image.cuda(non_blocking=True)
target = torch.tensor(dataset[id][1]).unsqueeze(0)
target = target.cuda(non_blocking=True)
#get the loss
output=private_model(image)
loss=F.cross_entropy(output, target).item()
heldout_losses.append(loss)
return train_losses,heldout_losses
def get_confidences(params):
"""
return uncalibrated confidences.
"""
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
device = torch.device('cpu')
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path,map_location=device)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cpu()
#get the appropriate ids to dot product
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
#load the dataset
dataset = get_dataset(params)
if params.aug:
train_confidences=np.zeros(len(hidden_masks['private']['train']))
heldout_confidences=np.zeros(len(hidden_masks['private']['train']))
train_summed_confs=[[0] for i in np.arange(len(hidden_masks['private']['train']))]
heldout_summed_confs=[[0] for i in np.arange(len(hidden_masks['private']['train']))]
for j in np.arange(10):
print('Aug', j)
train_images=torch.stack([dataset[i][0] for i in private_train_ids])
train_images=train_images.cpu()
heldout_images=torch.stack([dataset[i][0] for i in private_heldout_ids])
heldout_images=heldout_images.cpu()
log_softmax = torch.nn.LogSoftmax(dim=1)
train_output=private_model(train_images)
heldout_output=private_model(heldout_images)
log_train_output=log_softmax(train_output)
log_heldout_output=log_softmax(heldout_output)
train_confs,_=torch.max(log_train_output,dim=1)
heldout_confs,_=torch.max(log_heldout_output,dim=1)
train_confs=train_confs.cpu().detach().numpy()
heldout_confs=heldout_confs.cpu().detach().numpy()
for i,id in enumerate(private_train_ids):
train_summed_confs[id].append(train_confs[i])
for i,id in enumerate(private_heldout_ids):
heldout_summed_confs[id].append(heldout_confs[i])
for id in private_train_ids:
if params.aug_style=='mean':
train_confidences[id]=np.mean(train_summed_confs[id][1:])
elif params.aug_style=='max':
train_confidences[id]=np.max(train_summed_confs[id][1:])
elif params.aug_style=='median':
train_confidences[id]=np.median(train_summed_confs[id][1:])
elif params.aug_style=='std':
train_confidences[id]=np.std(train_summed_confs[id][1:])
for id in private_heldout_ids:
if params.aug_style=='mean':
heldout_confidences[id]=np.mean(heldout_summed_confs[id][1:])
elif params.aug_style=='max':
heldout_confidences[id]=np.max(heldout_summed_confs[id][1:])
elif params.aug_style=='median':
heldout_confidences[id]=np.median(heldout_summed_confs[id][1:])
elif params.aug_style=='std':
heldout_confidences[id]=np.std(heldout_summed_confs[id][1:])
train_confidences=train_confidences[private_train_ids]
heldout_confidences=heldout_confidences[private_heldout_ids]
else:
train_confidences=[]
heldout_confidences=[]
train_images=torch.stack([dataset[i][0] for i in private_train_ids])
train_images=train_images.cpu()
heldout_images=torch.stack([dataset[i][0] for i in private_heldout_ids])
heldout_images=heldout_images.cpu()
log_softmax = torch.nn.LogSoftmax(dim=1)
train_output=private_model(train_images)
heldout_output=private_model(heldout_images)
log_train_output=log_softmax(train_output)
log_heldout_output=log_softmax(heldout_output)
train_confidences,_=torch.max(log_train_output,dim=1)
heldout_confidences,_=torch.max(log_heldout_output,dim=1)
train_confidences=train_confidences.cpu().detach().numpy()
heldout_confidences=heldout_confidences.cpu().detach().numpy()
return train_confidences,heldout_confidences
def get_calibrated_distances(params, model1, model2, ids):
"""
return calibrated boundary distances.
"""
dataset = get_dataset(params)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
targets=torch.stack([torch.tensor(dataset[i][1]) for i in ids])
targets=targets.cuda()
outputs1=model1(images)
outputs2=model2(images)
images_pert1= hop_skip_jump_attack(model1,images,2, verbose=False,clip_min=params.clip_min, clip_max=params.clip_max)
images_pert2= hop_skip_jump_attack(model2,images,2, verbose=False,clip_min=params.clip_min, clip_max=params.clip_max)
# images_pert1=carlini_wagner_l2(model1,images,params.num_classes ,targets)
# images_pert2=carlini_wagner_l2(model2,images,params.num_classes ,targets)
dists1=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs1[i], 1)
if pred==targets[i].item():
dists1.append(torch.norm(images_pert1[i]- images[i], p=2).item())
else:
dists1.append(-torch.norm(images_pert1[i]- images[i], p=2).item())
dists2=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs2[i], 1)
if pred==targets[i].item():
dists2.append(torch.norm(images_pert2[i]- images[i], p=2).item())
else:
dists2.append(-torch.norm(images_pert1[i]- images[i], p=2).item())
calibrated_dists=np.subtract(np.array(dists1),np.array(dists2))
return calibrated_dists
def calibrated_distance_attack(params, num=10):
"""
run calibrated boundary distance attack.
"""
#load the masks
known_masks, hidden_masks = {}, {}
hidden_masks['public'], hidden_masks['private']={},{}
known_masks['public'] = torch.load(params.mask_path + "public.pth")
known_masks['private'] = torch.load( params.mask_path + "private.pth")
hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")
if params.public_data=='train':
print('Using public training data for auxiliary model')
attack_model=train(params, hidden_masks['public']['train'])
elif params.public_data[:4]=='rand':
print('Using random subset for auxiliary model')
public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
prop_selected=float(params.public_data[4:])/100
num_selected=math.ceil(prop_selected*len(public_ids))
permuted_ids=np.random.permutation(public_ids)
aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
attack_model=train(params, aux_data_mask)
else:
print('Using all public data for auxiliary model')
attack_model=train(params, known_masks['public'])
attack_model=attack_model.cuda()
#get the final model parameters
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
#get the appropriate ids
private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
private_train_ids=private_train_ids[np.random.choice(len(private_train_ids), size=num, replace=False)]
private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()
private_heldout_ids=private_heldout_ids[np.random.choice(len(private_heldout_ids), size=num, replace=False)]
train_losses=get_calibrated_distances(params, private_model, attack_model,private_train_ids)
heldout_losses=get_calibrated_distances(params, private_model, attack_model,private_heldout_ids)
return train_losses, heldout_losses
def get_boundary_distances(params, model, ids):
"""
return uncalibrated boundary distances.
"""
dataset = get_dataset(params)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
targets=[]
for i in ids:
temp=np.zeros(params.num_classes)
temp[dataset[i][1]]=1
temp=torch.tensor(temp)
targets.append(temp)
original_targets=torch.stack([torch.tensor(dataset[i][1]) for i in ids])
original_targets=original_targets.cuda()
targets=torch.stack(targets)
targets=targets.cuda()
outputs=model(images)
images_pert= hop_skip_jump_attack(model,images,2 ,verbose=False, clip_min=params.clip_min, clip_max=params.clip_max)
# images_pert=carlini_wagner_l2(model,images,params.num_classes ,original_targets)
dists=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs[i], 1)
if pred==original_targets[i].item():
dists.append(torch.norm(images_pert[i]- images[i], p=2).item())
else:
dists.append(0)
return dists
def boundary_distance_attack(params, num=10):
"""
run uncalibrated boundary distance attack.
"""
train_mask=torch.load(params.mask_path+'/hidden/train.pth')
heldout_mask=torch.load(params.mask_path+'/hidden/heldout.pth')
train_ids=(train_mask==True).nonzero().flatten().numpy()
heldout_ids=(heldout_mask==True).nonzero().flatten().numpy()
train_ids=train_ids[np.random.choice(len(train_ids), size=num, replace=False)]
heldout_ids=heldout_ids[np.random.choice(len(heldout_ids), size=num, replace=False)]
private_model=build_model(params)
private_model_path = os.path.join(params.model_path, "checkpoint.pth")
state_dict_private = torch.load(private_model_path)
private_model.load_state_dict(state_dict_private['model'])
private_model=private_model.cuda()
train_dists=get_boundary_distances(params, private_model, train_ids )
heldout_dists=get_boundary_distances(params, private_model, heldout_ids )
return train_dists, heldout_dists
if __name__ == '__main__':
parser = get_parser()
params = parser.parse_args()
train_vals, heldout_vals=calibrated_loss_attack(params) | calibration_membership-main | attacks/privacy_attacks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
from overlap.utils import logging as lu
log = logging.getLogger(__name__)
def individual_sort(aug_dists):
out = []
included = set()
arg_sort = np.argsort(aug_dists, axis=0)
sort = np.sort(aug_dists, axis=0)
for row in range(len(arg_sort)):
curr_arg = arg_sort[row]
curr_dists = sort[row]
sorted_args = curr_arg[np.argsort(curr_dists)]
for i in sorted_args:
if i not in included:
out.append(i)
included.add(i)
return np.array(out)
@hydra.main(config_path="conf/closest_augs.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
lr_policy = instantiate(cfg.optim.lr_policy)
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Transforms found, loading feature extractor is unnecessary. Skipping.")
else:
feature_extractor = instantiate(cfg.ft)
feature_extractor.train()
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Transforms found, feature extraction is unnecessary. Skipping.")
elif cfg.aug_feature_file and os.path.exists(cfg.aug_feature_file):
log.info("Found feature file. Loading from {}".format(cfg.aug_feature_file))
data = np.load(cfg.aug_feature_file)
augmentation_features = data['features']
indices = data['indices']
transforms = data['transforms']
else:
ft_augmentation_dataset = instantiate(cfg.ft_augmentation)
transforms = ft_augmentation_dataset.transform_list
indices = np.random.choice(np.arange(len(ft_augmentation_dataset)), size=cfg.num_images, replace=False)
ft_augmentation_dataset = ft_augmentation_dataset.serialize(indices)
augmentation_features = extract_features(feature_extractor,
ft_augmentation_dataset,
cfg.ft_augmentation.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
if cfg.aug_feature_file:
np.savez(cfg.aug_feature_file,
features=augmentation_features,
indices=indices,
transforms=transforms)
if cfg.transform_file and os.path.exists(cfg.transform_file):
log.info("Found transform file. Loading from {}.".format(cfg.transform_file))
sorted_transforms = np.load(cfg.transform_file)
else:
aug_strings = cfg.ft_corrupt.aug_string.split("--")
distances = np.zeros((len(augmentation_features), len(aug_strings)))
for i, aug in enumerate(aug_strings):
with omegaconf.open_dict(cfg):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug)
if cfg.num_corrupt_images and i==0:
indices = np.random.choice(np.arange(len(ft_corrupt_dataset)), size=cfg.num_corrupt_images, replace=False)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
corruption_features = extract_features(feature_extractor,
ft_corrupt_dataset,
cfg.ft_corrupt.batch_size,
cfg.data_loader,
average=True)
corruption_features = corruption_features.reshape(1, -1)
dists = np.linalg.norm(augmentation_features - corruption_features, axis=-1)
distances[:,i] = dists
sorted_dist_args = individual_sort(distances)
sorted_transforms = transforms[sorted_dist_args]
if cfg.transform_file:
np.save(cfg.transform_file, sorted_transforms)
train_dataset = instantiate(cfg.train)
if cfg.selection_type == 'closest':
train_dataset.transform_list = sorted_transforms[cfg.offset:cfg.offset+cfg.num_transforms]
elif cfg.selection_type == 'farthest':
train_dataset.transform_list = sorted_transforms[-cfg.offset-cfg.num_transforms:-cfg.offset]\
if cfg.offset != 0 else sorted_transforms[-cfg.num_transforms:]
else:
train_dataset.transform_list = sorted_transforms[np.random.choice(np.arange(len(sorted_transforms)), size=cfg.num_transforms, replace=False)]
test_dataset = instantiate(cfg.test)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch')
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| augmentation-corruption-fbr_main | experiments/closest_augs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/test_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
print("Loading test set...")
test_dataset = instantiate(cfg.test)
checkpoint = torch.load(cfg.weights, map_location='cpu')
if cfg.num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
| augmentation-corruption-fbr_main | experiments/test_imagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import overlap.utils.logging as lu
import decimal
import simplejson
import numpy as np
import omegaconf
from itertools import combinations
from itertools import product
from scipy.special import comb
parser = argparse.ArgumentParser(description="Collect run summaries.")
parser.add_argument('--new_corr_dir', dest='data_dir', required=True)
parser.add_argument('--baseline_corr_dir', dest='baseline_dir', required=True)
parser.add_argument('--precision', type=float, dest='precision', default=0.01)
parser.add_argument('--target_error', type=float, dest='target', required=True)
parser.add_argument('--num', type=int, dest='num', default=5)
parser.add_argument('--out', dest='out', required=True)
parser.add_argument('--log_name', default='severity_scan.log', dest='log_name')
def get_data(base_dirs, exclusions=[], log_file='severity_scan.log'):
features = {}
errors = {}
feature_file = 'features.npz'
path_stack = base_dirs
while path_stack:
curr_dir = path_stack.pop()
subdirs = [os.path.join(curr_dir, x) for x in os.listdir(curr_dir) if x[0] != '.']
subdirs = [x for x in subdirs if os.path.isdir(x)]
path_stack.extend(subdirs)
summary_file = os.path.join(curr_dir, log_file)
if os.path.exists(summary_file):
curr_features = np.load(os.path.join(curr_dir, feature_file))
features.update({k : v for k,v in curr_features.items() if k.split("-")[0] not in exclusions})
stats = lu.load_json_stats(summary_file)
curr_errs = {stats[i]["_type"] : stats[i]["top1_err"] for i in range(len(stats))\
if stats[i]["_type"] != "test_epoch" and stats[i]["_type"].split("-")[0] not in exclusions}
errors.update(curr_errs)
return errors, features
def get_average_spread(baseline_errs):
'''
Calculate the average spread in severity in the baseline data, so
the new corruption datasets can attempt to match it.
'''
bcorrs = sorted(list(set([c.split("-")[0] for c in baseline_errs.keys()])))
avg = 0.0
for bcorr in bcorrs:
lower = abs((baseline_errs["{}-1".format(bcorr)] / baseline_errs["{}-3".format(bcorr)] - 1))
upper = abs((baseline_errs["{}-5".format(bcorr)] / baseline_errs["{}-3".format(bcorr)] - 1))
avg += (lower + upper) / 2
return avg / len(bcorrs)
def build_sets(corr_errs, avg_spread):
'''
For each severity 3-8, associate a set of 5 severities with it that
best match the average spread, where that severity is the middle of
the five.
Inputs:
corr_errs: dictionary where each key is a string "{corr}-{severity}"
and each value is the test error.
avg_spread: float specifying the average spread to try to match
Output:
dictionary where each key is a string giving the corruption name,
and each value is a list of 5-tuples giving all sets of 5 severities
associated to that corruption.
'''
corrs = sorted(list(set([c.split("-")[0] for c in corr_errs.keys()])))
corr_sets = {c : [] for c in corrs}
for c in corrs:
sevs = sorted([float(i.split("-")[1]) for i in corr_errs.keys() if c == i.split("-")[0]])
for i in np.arange(2, len(sevs)-2):
# Sev 1
best = float('inf')
best_match_s1 = None
for j in np.arange(0, i-1):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(-avg_spread - sep)
if sep_sep <= best:
best = sep_sep
best_match_s1 = j
# Sev 2
best = float('inf')
best_match_s2 = None
for j in np.arange(best_match_s1+1, i):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(-avg_spread/2 - sep)
if sep_sep <= best:
best = sep_sep
best_match_s2 = j
# Sev 5
best = float('inf')
best_match_s5 = None
for j in np.arange(i+2, len(sevs)):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(avg_spread - sep)
if sep_sep <= best:
best = sep_sep
best_match_s5 = j
# Sev 4
best = float('inf')
best_match_s4 = None
for j in np.arange(i+1, best_match_s5):
sep = corr_errs["{}-{}".format(c, sevs[j])] / corr_errs["{}-{}".format(c, sevs[i])] - 1
sep_sep = abs(avg_spread/2 - sep)
if sep_sep <= best:
best = sep_sep
best_match_s4 = j
corr_sets[c].append((sevs[best_match_s1], sevs[best_match_s2], sevs[i], sevs[best_match_s4], sevs[best_match_s5]))
return corr_sets
def build_distance_table(baseline_features, corr_features):
'''
Calculates distances between corruption features. Across baseline
features and severities, takes the minimum distance, across
severities in the new corruption set, takes the average.
Inputs:
baseline_features: 3d numpy array ordered as
[corruption, severity, features]
corr_features: 4d numpy array ordered as
[corruption, severity_set, severity, features]
Output
2d numpy array ordered as [corruption, severity_set]
'''
nc, nss, ns, nf = corr_features.shape
corr_features = corr_features.reshape(nc, nss, ns, 1, 1, nf)
nb, _, _ = baseline_features.shape
baseline_features = baseline_features.reshape(1, 1, 1, nb, ns, nf)
dists = np.linalg.norm(corr_features - baseline_features, axis=-1)
min_dists = np.mean(np.min(np.min(dists, axis=-1), axis=-1), axis=-1)
return min_dists
def build_corr_tables(corr_sets, corr_errs, corr_features):
'''
Given a list of corruption severity sets, builds the tables that
will be used to calculate distance. For each corruption, the tables
are ordered in increasing order, since this is required to make the
dataset selection algorithm more efficient. This ordering is also
returned so it can be reversed at the end.
Inputs:
corr_sets: dictionary of corruption keys with lists of severity set
values
corr_errs: dictionary of keys with the form '{corr}-{severity}' and
values that are the errors on that corruption/severity pair
corr_features: dictionary of keys with the form '{corr}-{severity}'
and values that are the features on that corruption/severity pair
Outputs:
1. A list of all corruption strings, in the order they appear the
table.
2. dictionary where the keys are corruption strings, and the values
are the severity sets ordered by increasing corruption error.
3. 2d numpy array with the shape [corruption, severity_set] that
gives the average error on that severity set and corruption
4. 4d numpy array with the shape
[corruption, severity_set, severity, features]
'''
corrs = list(corr_sets.keys())
ordered = {}
len_feats = len(list(corr_features.values())[0])
err_table = np.zeros((len(corrs), len(corr_sets[corrs[0]])))
feat_table = np.zeros((len(corrs), len(corr_sets[corrs[0]]), len(corr_sets[corrs[0]][0]), len_feats))
for i, corr in enumerate(corrs):
curr_errs = np.zeros(len(corr_sets[corr]))
curr_feats = np.zeros((len(corr_sets[corr]), len(corr_sets[corrs[0]][0]), len_feats))
for j, sev_list in enumerate(corr_sets[corr]):
curr_errs[j] = np.mean([corr_errs["{}-{}".format(corr, s)] for s in sev_list])
curr_feats[j,:,:] = np.array([corr_features["{}-{}".format(corr, s)] for s in sev_list])
sev_order = np.argsort(curr_errs)
err_table[i,:] = np.sort(curr_errs)
feat_table[i, :, :, :] = curr_feats[sev_order, :, :]
ordered[corr] = np.array(corr_sets[corr])[sev_order]
return corrs, ordered, err_table, feat_table
def build_baseline_features(baseline_features):
'''
Builds a table of baseline corruption features, given a dictionary.
Inputs:
baseline_features: dictionary of features with keys that are strings
as "{corr}-{severity}"
Outputs:
3d numpy array ordered as [corruption, severity, features].
'''
corrs = sorted(list(set([v.split("-")[0] for v in baseline_features.keys()])))
sevs = sorted(list(set([int(v.split("-")[1]) for v in baseline_features.keys()])))
len_feats = len(list(baseline_features.values())[0])
baseline_table = np.zeros((len(corrs), len(sevs), len_feats))
for i, c in enumerate(corrs):
for j, s in enumerate(sevs):
baseline_table[i,j,:] = baseline_features["{}-{}".format(c,s)]
return baseline_table
def sample_matched_corruptions(err_table, baseline_err, precision, num):
'''
Iterates over all 'num'-sized combinations of corruptions and selects
a set of severities that has error within 'precision' of the baseline
error. If multiple sets of severities fall within the precision
window, it picks one at random. If none do, it skips that combination
of corruptions.
The runtime is O((num_corruptions * num_severity_sets)^num), though
in practice the algorithm below is usually
O(num_corruptions^num * num_severity_sets^(num-1)).
Inputs:
err_table: 2d numpy array of shape [corruptions, severity_sets]
listing the average error for each set.
baseline_err: float giving the target error to match
precision: float giving the percentage variation from the baseline
error allowed for an accepted severity set
num: int listing the number of corruptions to combine
Output:
A list of sampled datasets, where each sampled dataset is a list
of 'num' 2-tuples (corruption_index, severity_set_index).
'''
count = 0
total = comb(err_table.shape[0], num, exact=True)
chosen_augs = []
for idxs in combinations(range(err_table.shape[0]), num):
all_augs = []
count += 1
if count % 1000 == 0:
print("On iteration {}/{}".format(count, total))
# Loop over severities for all chosen corruptions except for the
# last two. Since the severity sets are ordered by average error,
# we can work from the outside in to typically save one factor of
# 'num' in calculation time.
for sev_idxs in product(*[range(err_table.shape[1]) for i in range(num-2)]):
target = baseline_err * num
err_sum = 0.0
for i in range(num-2):
err_sum += err_table[idxs[i], sev_idxs[i]]
stack = [(0, err_table.shape[1]-1)] # Start on the two ends
seen = set()
while stack:
i, j = stack.pop()
if (i,j) in seen or i >= err_table.shape[1] or j < 0:
continue
seen.add((i,j))
final_err_sum = err_sum + err_table[idxs[-2],i] + err_table[idxs[-1],j]
if abs((target-final_err_sum)/target) < precision:
curr = [(idxs[k], sev_idxs[k]) for k in range(num-2)] + [(idxs[-2],i), (idxs[-1],j)]
all_augs.append(curr)
stack.append([i+1, j])
stack.append([i, j-1])
elif (target-final_err_sum)/target >= precision:
stack.append([i+1, j])
else:
stack.append([i, j-1])
if all_augs:
idx_choice = np.random.randint(low=0, high=len(all_augs))
chosen_augs.append(all_augs[idx_choice])
return chosen_augs
def main():
args = parser.parse_args()
data_dir = args.data_dir
baseline_dir = args.baseline_dir
precision = args.precision
num_corr = args.num
out_file = args.out
log_name = args.log_name
target_error = args.target
baseline_exclusions = ['saturate', 'spatter', 'gaussian_blur', 'speckle_noise']
corr_exclusions = []
print("Loading data...")
data_dirs = data_dir.split(",")
baseline_dirs = baseline_dir.split(",")
corr_errs, corr_features = get_data(data_dirs, corr_exclusions, log_file=log_name)
baseline_errs, baseline_features = get_data(baseline_dirs, exclusions=baseline_exclusions, log_file=log_name)
baseline_table = build_baseline_features(baseline_features)
avg_spread = get_average_spread(baseline_errs)
corr_sets = build_sets(corr_errs, avg_spread)
corrs, ordered_sev_list, err_table, feat_table = build_corr_tables(corr_sets, corr_errs, corr_features)
dists = build_distance_table(baseline_table, feat_table)
chosen = sample_matched_corruptions(err_table, target_error, precision, num_corr)
out = []
for aug_list in chosen:
sub_aug_strings = []
err = 0.0
curr_dists = None
for a in aug_list:
corr = corrs[a[0]]
sevs = ordered_sev_list[corr][a[1]]
sub_aug_strings.append("--".join(["{}-{}".format(corr,s) for s in sevs]))
err += err_table[a[0], a[1]]
curr_curr_dists = dists[a[0], a[1]]
curr_dists = np.concatenate((curr_dists, curr_curr_dists.reshape(1,-1)), axis=0) if curr_dists is not None else curr_curr_dists.reshape(1,-1)
err /= len(aug_list)
avg_dists = np.mean(curr_dists, axis=0)
aug_string = "--".join(sub_aug_strings)
data_out = ",".join([aug_string, str(err)] + [str(x) for x in avg_dists])
out.append(data_out)
with open(out_file, 'w') as f:
f.write(",,".join([data_dir, baseline_dir, str(precision), str(num_corr)]))
f.write("\n")
f.write("\n".join(out))
if __name__=="__main__":
main()
| augmentation-corruption-fbr_main | experiments/sample_datasets.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net_jsd import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_imagenet_jsd.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0 and cfg.train.weights is None:
print("Loading training set...")
train_dataset = instantiate(cfg.train)
else:
print("Skipping loading the training dataset, 0 epochs of training to perform "
" or pre-trained weights provided.")
train_dataset = None
print("Loading test set...")
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
print("Training...")
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader,
jsd_num=cfg.train.params.jsd_num,
jsd_alpha=cfg.train.jsd_alpha
)
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
| augmentation-corruption-fbr_main | experiments/train_imagenet_jsd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="Calculate corruptions distance "\
"to the baseline corruptions and find a representative dataset of "\
"corruptions that are farthest away.")
parser.add_argument('--input_files', type=str, required=True,
help='Comma separated list of files for error matched corruptions.')
parser.add_argument('--target_error', type=float, required=True,
help='Target ImageNet-C error for choosing a representative dataset.'
)
def calc_shifts(corrs_files):
corr_shifts_all = []
for corrs_file in corrs_files:
with open(corrs_file, 'r') as f:
lines = [line.rstrip() for line in f.readlines()]
# Data is [corruption-string, error, distance]
data = [line.split(",") for line in lines[1:]]
corrs = set()
# Add the corruptions as a set to the data for easy determination of the intersection
for i in range(len(data)):
curr_corrs = set([a.split("-")[0] for a in data[i][0].split("--")])
data[i].append(curr_corrs)
corrs.update(curr_corrs) # Collect all types of corruptions seen for access later
corrs = list(corrs)
# Sample random sets of 10 corruptions
sampled = []
while len(sampled) < 100000:
chosen = np.random.randint(low=0, high=len(data), size=2)
# Take only disjoint combinations to get a full 10 sampled.
if not (data[chosen[0]][-1] & data[chosen[1]][-1]):
sampled.append((
"--".join([data[chosen[0]][0], data[chosen[1]][0]]), # Combined aug string
(float(data[chosen[0]][1]) + float(data[chosen[1]][1])) / 2, # Average error
(float(data[chosen[0]][2]) + float(data[chosen[1]][2])) / 2 # Average distance
))
# Calculate shifts associated with each corruption
corr_shifts = []
sampled_mean = np.mean([float(s[2]) for s in sampled]) # Mean error
sampled_std = np.std([float(s[2]) for s in sampled]) # Mean std
# Get per corruption shifts in distance
for corr in corrs:
pos = []
# Find all distances from datasets that include this corruption
for s in sampled:
included_corrs = [a.split("-")[0] for a in s[0].split("--")]
if corr in included_corrs:
pos.append(float(s[2]))
# Calculate average shift for this corruption
pos_avg = np.mean(pos)
# Shift by average distance and reweight by distance std
shift = (pos_avg - sampled_mean) / sampled_std
corr_shifts.append(shift)
corr_shifts_all.append(corr_shifts)
# Calculate mean and std across multiple runs
corr_shifts_all = np.array(corr_shifts_all)
corr_shifts_mean = np.mean(corr_shifts_all, axis=0)
corr_shifts_std = np.std(corr_shifts_all, axis=0)
shifts = {corr : (corr_shifts_mean[i], corr_shifts_std[i]) for i, corr in enumerate(corrs)}
return shifts
def topk_shifts(shifts, k):
shifts_list = np.array([v[0] for k, v in shifts.items()])
corrs_list = np.array([k for k, v in shifts.items()])
ordered_idx = np.argsort(shifts_list)
topk = ordered_idx[-k:]
return corrs_list[topk]
def get_farthest_dataset(farthest_corrs, corrs_files, target_error):
farthest_corrs = set(farthest_corrs)
valid_all = []
for corrs_file in corrs_files:
valid = []
with open(corrs_file, 'r') as f:
lines = [line.rstrip() for line in f.readlines()]
data = [line.split(",") for line in lines[1:]]
for i in range(len(data)):
data[i].append(set([a.split("-")[0] for a in data[i][0].split("--")]))
for datum in data:
augs = datum[-1]
if len(augs & farthest_corrs) == 5:
valid.append(datum)
valid_all.append(valid)
matched_all = []
for valid in valid_all:
matched = []
for i in np.arange(len(valid)):
for j in np.arange(start=i+1, stop=len(valid)):
if not (valid[i][-1] & valid[j][-1]):
matched.append((
"--".join([valid[i][0], valid[j][0]]), # Combined corruption string
(float(valid[i][1]) + float(valid[j][1])) / 2, # Average error
(float(valid[i][2]) + float(valid[j][2])) / 2 # Average distance
))
matched_all.append(matched)
best = None
for i, matched in enumerate(matched_all):
for m in matched:
if best is None or np.abs(m[1]-target_error) < np.abs(best[1] - target_error):
best = m
best_corr_dir = i
return best
def main():
args = parser.parse_args()
file_list = args.input_files.split(",")
shifts = calc_shifts(file_list)
farthest_corrs = topk_shifts(shifts, k=10)
corr_string = get_farthest_dataset(farthest_corrs, file_list, args.target_error)
print(shifts)
print(corr_string)
if __name__=="__main__":
main()
| augmentation-corruption-fbr_main | experiments/calc_distance_shifts.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/severity_scan_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader=True):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0:
train_dataset = instantiate(cfg.train)
else:
train_dataset = None
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
with omegaconf.open_dict(cfg):
feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)
feature_extractor.train()
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch',
num_gpus=cfg.num_gpus)
if os.path.exists(cfg.feature_file):
feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}
else:
feature_dict = {}
indices = np.load(cfg.ft_corrupt.indices_file)
for aug in cfg.aug_string.split("--"):
if len(aug.split("-")) > 1:
#log.info("Severity provided in corrupt.aug_string will be weighted by given severity.")
sev = aug.split("-")[1]
if len(sev.split("_")) > 1:
low = float(sev.split("_")[0])
high = float(sev.split("_")[1])
else:
low = 0.0
high = float(sev)
sev_factor = (high - low) * cfg.severity / 10 + low
else:
sev_factor = cfg.severity
aug = aug.split("-")[0]
aug_string = "{}-{}".format(aug, sev_factor)
if aug_string in feature_dict:
continue
with omegaconf.open_dict(cfg.corrupt):
corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)
err = test_net(model=model,
test_dataset=corrupt_dataset,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
output_name=aug_string,
num_gpus=cfg.num_gpus)
with omegaconf.open_dict(cfg.ft_corrupt):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)
if cfg.ft_corrupt.params.num_transforms is not None:
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
else:
ft_corrupt_dataset = torch.utils.data.Subset(ft_corrupt_dataset, indices)
feature = extract_features(feature_extractor=feature_extractor,
dataset=ft_corrupt_dataset,
batch_size=cfg.ft_corrupt.batch_size,
loader_params=cfg.data_loader,
average=True,
num_gpus=cfg.num_gpus)
feature_dict[aug_string] = feature
if is_leader:
np.savez(cfg.feature_file, **feature_dict)
if __name__=="__main__":
run()
| augmentation-corruption-fbr_main | experiments/severity_scan_imagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net_jsd import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
from pathlib import Path
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_cifar10_jsd.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
jsd_num=cfg.train.params.jsd_num,
jsd_alpha=cfg.train.jsd_alpha
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| augmentation-corruption-fbr_main | experiments/train_cifar10_jsd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
from overlap.utils import logging as lu
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/feature_corrupt_error.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
feature_extractor = instantiate(cfg.ft)
feature_extractor.train()
if cfg.aug_feature_file and os.path.exists(cfg.aug_feature_file):
log.info("Found feature file. Loading from {}".format(cfg.aug_feature_file))
data = np.load(cfg.aug_feature_file)
augmentation_features = data['features']
indices = data['indices']
else:
ft_augmentation_dataset = instantiate(cfg.ft_augmentation)
indices = np.random.choice(np.arange(len(ft_augmentation_dataset)), size=cfg.num_images, replace=False)
ft_augmentation_dataset = ft_augmentation_dataset.serialize(indices)
augmentation_features = extract_features(feature_extractor,
ft_augmentation_dataset,
cfg.ft_augmentation.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
#nf, lf = augmentation_features.shape
#augmentation_features = np.mean(augmentation_features.reshape(len(indices), nf//len(indices), lf), axis=0)
if cfg.aug_feature_file:
np.savez(cfg.aug_feature_file, features=augmentation_features, indices=indices)
aug_strings = cfg.ft_corrupt.aug_string.split("--")
for aug in aug_strings:
with omegaconf.open_dict(cfg):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
corruption_features = extract_features(feature_extractor,
ft_corrupt_dataset,
cfg.ft_corrupt.batch_size,
cfg.data_loader,
average=True,
average_num=len(indices))
nf, lf = corruption_features.shape
#corruption_features = np.mean(corruption_features.reshape(len(indices), nf//len(indices), lf), axis=0)
augmentation_features = augmentation_features.reshape(-1, 1, lf)
corruption_features = corruption_features.reshape(1, -1, lf)
mean_aug = np.mean(augmentation_features.reshape(-1,lf), axis=0)
mean_corr = np.mean(corruption_features.reshape(-1,lf), axis=0)
mmd = np.linalg.norm(mean_aug-mean_corr, axis=0)
msd = np.min(np.linalg.norm(augmentation_features.reshape(-1,lf)-mean_corr.reshape(1,lf),axis=1),axis=0)
stats = {"_type" : aug,
"mmd" : str(mmd),
"msd" : str(msd),
}
lu.log_json_stats(stats)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch')
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| augmentation-corruption-fbr_main | experiments/feature_corrupt_error.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
import numpy as np
import torch
import pickle
import os
import omegaconf
from overlap.extract_features import extract_features
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/severity_scan.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader=True):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0:
train_dataset = instantiate(cfg.train)
else:
train_dataset = None
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
with omegaconf.open_dict(cfg):
feature_extractor = instantiate(cfg.ft, num_gpus=cfg.num_gpus, is_leader=is_leader)
feature_extractor.train()
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
output_name='test_epoch',
num_gpus=cfg.num_gpus)
if os.path.exists(cfg.feature_file):
feature_dict = {k : v for k, v in np.load(cfg.feature_file).items()}
else:
feature_dict = {}
indices = np.load(cfg.ft_corrupt.indices_file)
for aug in cfg.aug_string.split("--"):
if len(aug.split("-")) > 1:
#log.info("Severity provided in corrupt.aug_string will be weighted by given severity.")
sev = aug.split("-")[1]
if len(sev.split("_")) > 1:
low = float(sev.split("_")[0])
high = float(sev.split("_")[1])
else:
low = 0.0
high = float(sev)
sev_factor = (high - low) * cfg.severity / 10 + low
else:
sev_factor = cfg.severity
aug = aug.split("-")[0]
aug_string = "{}-{}".format(aug, sev_factor)
if aug_string in feature_dict:
continue
with omegaconf.open_dict(cfg.corrupt):
corrupt_dataset = instantiate(cfg.corrupt, aug_string=aug_string)
err = test_net(model=model,
test_dataset=corrupt_dataset,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
output_name=aug_string,
num_gpus=cfg.num_gpus)
with omegaconf.open_dict(cfg.ft_corrupt):
ft_corrupt_dataset = instantiate(cfg.ft_corrupt, aug_string=aug_string)
ft_corrupt_dataset = ft_corrupt_dataset.serialize(indices)
feature = extract_features(feature_extractor=feature_extractor,
dataset=ft_corrupt_dataset,
batch_size=cfg.ft_corrupt.batch_size,
loader_params=cfg.data_loader,
average=True,
num_gpus=cfg.num_gpus)
feature_dict[aug_string] = feature
if is_leader:
np.savez(cfg.feature_file, **feature_dict)
if __name__=="__main__":
run()
| augmentation-corruption-fbr_main | experiments/severity_scan.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/test_cifar10.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
test_dataset = instantiate(cfg.test)
checkpoint = torch.load(cfg.weights, map_location='cpu')
model.load_state_dict(checkpoint['model_state'])
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| augmentation-corruption-fbr_main | experiments/test_cifar10.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
import submitit
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_imagenet.yaml")
def run(cfg):
if cfg.num_gpus > 1:
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
if rank != 0:
logging.root.handlers = []
try:
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend='nccl',
init_method="tcp://{}:{}".format('localhost', 10001),
world_size=world_size,
rank=rank
)
train(cfg, is_leader=(rank==0))
except KeyboardInterrupt:
pass
finally:
torch.distributed.destroy_process_group()
else:
train(cfg, is_leader=True)
def train(cfg, is_leader):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
cur_device = torch.cuda.current_device()
model = instantiate(cfg.model).cuda(device=cur_device)
if cfg.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
optimizer = instantiate(cfg.optim, model.parameters())
if cfg.optim.max_epoch > 0 and cfg.train.weights is None:
print("Loading training set...")
train_dataset = instantiate(cfg.train)
else:
print("Skipping loading the training dataset, 0 epochs of training to perform "
" or pre-trained weights provided.")
train_dataset = None
print("Loading test set...")
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
print("Training...")
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights,
num_gpus=cfg.num_gpus,
is_leader=is_leader
)
print("Testing...")
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader,
num_gpus=cfg.num_gpus)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file,
num_gpus=cfg.num_gpus,
log_name='train_imagenet.log')
if __name__=="__main__":
run()
| augmentation-corruption-fbr_main | experiments/train_imagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from hydra.utils import instantiate
import logging
from overlap.train_net import train_net
from overlap.test_net import test_net
from overlap.test_corrupt_net import test_corrupt_net
import numpy as np
import torch
import pickle
from pathlib import Path
log = logging.getLogger(__name__)
@hydra.main(config_path="conf/train_cifar10.yaml")
def train(cfg):
np.random.seed(cfg.rng_seed)
torch.manual_seed(cfg.rng_seed)
log.info(cfg.pretty())
model = instantiate(cfg.model).cuda()
optimizer = instantiate(cfg.optim, model.parameters())
train_dataset = instantiate(cfg.train)
test_dataset = instantiate(cfg.test)
lr_policy = instantiate(cfg.optim.lr_policy)
train_net(model=model,
optimizer=optimizer,
train_dataset=train_dataset,
batch_size=cfg.train.batch_size,
max_epoch=cfg.optim.max_epoch,
loader_params=cfg.data_loader,
lr_policy=lr_policy,
save_period=cfg.train.checkpoint_period,
weights=cfg.train.weights
)
err = test_net(model=model,
test_dataset=test_dataset,
batch_size=cfg.test.batch_size,
loader_params=cfg.data_loader)
test_corrupt_net(model=model,
corrupt_cfg=cfg.corrupt,
batch_size=cfg.corrupt.batch_size,
loader_params=cfg.data_loader,
aug_string=cfg.corrupt.aug_string,
clean_err=err,
mCE_denom=cfg.corrupt.mCE_baseline_file)
if __name__=="__main__":
train()
| augmentation-corruption-fbr_main | experiments/train_cifar10.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import overlap.utils.logging as lu
import decimal
import simplejson
import numpy as np
import omegaconf
from itertools import combinations
from itertools import product
from scipy.special import comb
import math
parser = argparse.ArgumentParser(description="Collect run summaries.")
parser.add_argument('--cifar10c_dir', dest='baseline_dir', required=True)
parser.add_argument('--log_name', dest='log_name', default='severity_scan.log')
def get_data(base_dirs, exclusions=[], log_file='severity_scan.log'):
features = {}
errors = {}
feature_file = 'features.npz'
path_stack = base_dirs
while path_stack:
curr_dir = path_stack.pop()
subdirs = [os.path.join(curr_dir, x) for x in os.listdir(curr_dir) if x[0] != '.']
subdirs = [x for x in subdirs if os.path.isdir(x)]
path_stack.extend(subdirs)
summary_file = os.path.join(curr_dir, log_file)
if os.path.exists(summary_file):
curr_features = np.load(os.path.join(curr_dir, feature_file))
features.update({k : v for k,v in curr_features.items() if k.split("-")[0] not in exclusions})
stats = lu.load_json_stats(summary_file)
curr_errs = {stats[i]["_type"] : stats[i]["top1_err"] for i in range(len(stats))\
if stats[i]["_type"] != "test_epoch" and stats[i]["_type"].split("-")[0] not in exclusions}
errors.update(curr_errs)
return errors, features
def get_target_error(baseline_errs):
errs = [err for b, err in baseline_errs.items()]
return sum(errs)/len(errs)
def dict_avg(list_of_dicts):
out = None
for d in list_of_dicts:
if out is None:
out = d
else:
for k in out:
out[k] += d[k]
for k in out:
out[k] /= len(list_of_dicts)
std = None
for d in list_of_dicts:
if std is None:
std = {}
for k in out:
std[k] = (d[k]-out[k])**2
else:
for k in out:
std[k] += (d[k]-out[k])**2
for k in std:
std[k] = math.sqrt(std[k]) / len(list_of_dicts)
return out, std
def main():
args = parser.parse_args()
baseline_dir = args.baseline_dir
log_name = args.log_name
baseline_exclusions = ['saturate', 'spatter', 'gaussian_blur', 'speckle_noise']
print("Loading data...")
baseline_dirs = baseline_dir.split(",")
baseline_errs_list = []
baseline_features_list = []
for baseline_dir in baseline_dirs:
baseline_errs, baseline_features = get_data([baseline_dir], log_file=log_name, exclusions=baseline_exclusions)
baseline_errs_list.append(baseline_errs)
baseline_features_list.append(baseline_features)
baseline_errs, baseline_std = dict_avg(baseline_errs_list)
target_error = get_target_error(baseline_errs)
print(target_error)
if __name__=="__main__":
main()
| augmentation-corruption-fbr_main | experiments/tools/get_target_error.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import overlap.utils.logging as lu
import decimal
import simplejson
import numpy as np
import omegaconf
parser = argparse.ArgumentParser(description="Collect run summaries.")
parser.add_argument('--dir', dest='run_dir')
parser.add_argument('--filename', dest='summary_name', default='train_cifar10.log')
def main():
args = parser.parse_args()
run_dir = args.run_dir
summary_name = args.summary_name
hydra_config = '.hydra/config.yaml'
files = []
path_stack = [run_dir]
while path_stack:
curr_dir = path_stack.pop()
subdirs = [os.path.join(curr_dir, x) for x in os.listdir(curr_dir) if x[0] != '.']
subdirs = [x for x in subdirs if os.path.isdir(x)]
path_stack.extend(subdirs)
summary_file = os.path.join(curr_dir, summary_name)
if os.path.exists(summary_file):
config_file = os.path.join(curr_dir, hydra_config)
files.append((summary_file, config_file))
for (summary, config) in files:
data = []
cfg = omegaconf.OmegaConf.load(config)
stats = lu.load_json_stats(summary)
#Run meta-deta
data.append(cfg.rng_seed) # ID
data.append(cfg.name) # Name
data.append(summary) # Filename for data
#Model info
data.append(cfg.model['class'].split('.')[-1]) # Model name
data.append(cfg.model.params.depth) # Model depth
data.append(cfg.model.params.widen_factor) # Width factor
# Optimizer info
data.append(cfg.optim.lr_policy['class'].split('.')[-1]) # LR policy
data.append(cfg.optim.base_lr) # Base LR
data.append(cfg.optim.max_epoch) # Num epochs
# Augmentation info
aug_data = []
train_class = cfg.train['class'].split('.')[-1]
if train_class == 'Cifar10Base': # No augmentation
aug_data.append('none')
elif train_class == 'Cifar10Augmix': # Augmix
aug_data.append('augmix')
aug_string = cfg.train.params.aug_string
if aug_string is None:
aug_string = 'standard'
aug_data.append(aug_string) # Aug string
aug_data.append(cfg.train.params.width) # Augmix width
reported_depth = cfg.train.params.depth * (-1 if cfg.train.params.random_depth else -1)
aug_data.append(reported_depth) # Augmix depth
aug_data.append(cfg.train.params.prob_coeff) # Augmix prob coefficient
severity = cfg.train.params.severity if cfg.train.params.aug_string is None else ''
aug_data.append(severity) # Augmix severity
elif train_class == 'Cifar10Corruption': # Corruption Trained
aug_data.append('corruption_trained')
aug_string = cfg.train.params.aug_string
if aug_string is None:
aug_string = 'extra' if cfg.train.params.include_extra else 'standard'
aug_data.append(aug_string) #
aug_data.extend(['' for i in range(6-len(aug_data))])
data.extend(aug_data)
# Feature extraction info
data.extend(['' for i in range(17)])
# JSD info
data.append('no')
data.extend(['',''])
# Batch size info
data.append(cfg.train.batch_size)
data.append(cfg.test.batch_size)
# Errors
clean_error = lu.parse_json_stats(stats, 'test_epoch', 'top1_err')[0]
data.append(clean_error) # Clean error
data.extend(['', '']) # Space for clean error std and C95
mCE = lu.parse_json_stats(stats, 'overall-avg', 'mCE')
mCE = mCE[0] if mCE else ''
data.append(mCE) # mCE
data.extend(['', '']) # Space for mCE std and C95
rmCE = lu.parse_json_stats(stats, 'overall-avg', 'rmCE')
rmCE = rmCE[0] if rmCE else ''
data.append(rmCE) # rmCE
data.extend(['', '']) # Space for rmCE std and C95
avg_error = lu.parse_json_stats(stats, 'overall-avg', 'top1_err')[0]
data.append(avg_error) # Average corruption error
data.extend(['', '']) # Space for corruption error std and C95
data.extend(['', '']) # Space for number in average and type of average
data.append('') # Divider
# Individual errors
# Noise
data.append(lu.parse_json_stats(stats, 'noise-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'gaussian_noise-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'impulse_noise-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'shot_noise-avg', 'top1_err')[0])
# Blur
data.append(lu.parse_json_stats(stats, 'blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'defocus_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'glass_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'motion_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'zoom_blur-avg', 'top1_err')[0])
# Weather
data.append(lu.parse_json_stats(stats, 'weather-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'brightness-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'fog-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'frost-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'snow-avg', 'top1_err')[0])
# Digital
data.append(lu.parse_json_stats(stats, 'digital-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'contrast-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'elastic_transform-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'jpeg_compression-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'pixelate-avg', 'top1_err')[0])
# Extra
data.append(lu.parse_json_stats(stats, 'extra-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'gaussian_blur-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'saturate-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'spatter-avg', 'top1_err')[0])
data.append(lu.parse_json_stats(stats, 'speckle_noise-avg', 'top1_err')[0])
data = [str(i) for i in data]
print(",".join(data))
if __name__ == "__main__":
main()
| augmentation-corruption-fbr_main | experiments/tools/summarize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="Generate random indicies '\
'for sampling from the CIFAR-10 or ImageNet training sets.")
parser.add_argument('--dataset', type=str, required=True,
help='Should be in [\'cifar-10\', \'imagenet\'].')
parser.add_argument('--num', type=int, required=True,
help='Number of indices to generate.')
parser.add_argument('--out', type=str, required=True,
help='Output file. Should be .npy format.')
def main():
args = parser.parse_args()
assert args.dataset in ['cifar-10', 'imagenet'], "Unknown dataset."
max_index = 50000 if args.dataset=='cifar-10' else 1281167
indices = np.random.choice(np.arange(max_index), size=args.num, replace=False)
np.save(args.out, indices)
if __name__=="__main__":
main()
| augmentation-corruption-fbr_main | experiments/tools/sample_image_indices.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
from omegaconf import open_dict
from .augmentations.utils import aug_finder
from hydra.utils import instantiate
import numpy as np
import os
import pickle
log = logging.getLogger(__name__)
def test_corrupt_net(model, corrupt_cfg, batch_size, loader_params, aug_string=None, mCE_denom=None, clean_err=None, imagenetc_grouping=True, num_gpus=1, log_name=None):
model.eval()
if aug_string is None:
augs = aug_finder.get_augs_by_tag(['imagenet_c'])
severities = [1,2,3,4,5]
augs = ["{}-{}".format(a.name, s) for a in augs for s in severities]
else:
augs = aug_string.split("--")
if log_name is not None and os.path.exists(log_name):
prestats = lu.load_json_stats(log_name)
else:
prestats = None
errs = []
for aug in augs:
if prestats is not None and len(lu.parse_json_stats(prestats, row_type=aug, key='top1_err')) > 0:
continue
with open_dict(corrupt_cfg):
corrupt_dataset = instantiate(corrupt_cfg, aug_string=aug)
sampler = torch.utils.data.distributed.DistributedSampler(corrupt_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
corrupt_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(corrupt_dataset))
stats = {'_type' : aug, 'top1_err' : err}
lu.log_json_stats(stats)
errs.append(err)
# Calculating records
if mCE_denom is not None:
mCE_denom = pickle.load(open(os.path.join(os.path.dirname(__file__), '../baseline_data/', mCE_denom), 'rb'))
errs = np.array(errs)
aug_names = [a.split("-")[0] for a in augs]
unique_aug_names = list(set(aug_names))
avg_errs = [np.mean(errs[[i for i, a in enumerate(aug_names) if a==u]]) for u in unique_aug_names]
avg_errs = np.array(avg_errs)
mCE = None
rmCE = None
if mCE_denom:
mCE = [100 * avg_errs[i] / mCE_denom[a] for i, a in enumerate(unique_aug_names)]
mCE = np.array(mCE)
if clean_err:
rmCE = [100 * (avg_errs[i] - clean_err) / (mCE_denom[a] - mCE_denom['clean'])\
for i, a in enumerate(unique_aug_names)]
rmCE = np.array(rmCE)
for i, a in enumerate(unique_aug_names):
stats = {'_type' : a + '-avg', 'top1_err' : avg_errs[i]}
if mCE is not None:
stats['mCE'] = mCE[i]
if rmCE is not None:
stats['rmCE'] = rmCE[i]
lu.log_json_stats(stats)
if imagenetc_grouping:
for aug_type in ['blur', 'digital', 'noise', 'weather', 'extra']:
aug_indices = [i for i, a in enumerate(unique_aug_names)\
if aug_type in aug_finder.get_aug_by_name(a).tags]
err_for_type = np.mean(avg_errs[aug_indices])
stats = {'_type' : aug_type + '-avg', 'top1_err' : err_for_type}
if mCE is not None:
mCE_for_type = np.mean(mCE[aug_indices])
stats['mCE'] = mCE_for_type
if rmCE is not None:
rmCE_for_type = np.mean(rmCE[aug_indices])
stats['rmCE'] = rmCE_for_type
lu.log_json_stats(stats)
if imagenetc_grouping:
indices = [i for i, a in enumerate(unique_aug_names)\
if 'extra' not in aug_finder.get_aug_by_name(a).tags]
else:
indices = [i for i, a in enumerate(unique_aug_names)]
overall_avg = np.mean(avg_errs[indices])
stats = {'_type' : 'overall-avg', 'top1_err' : overall_avg}
if mCE is not None:
overall_mCE = np.mean(mCE[indices])
stats['mCE'] = overall_mCE
if rmCE is not None:
overall_rmCE = np.mean(rmCE[indices])
stats['rmCE'] = overall_rmCE
lu.log_json_stats(stats)
| augmentation-corruption-fbr_main | experiments/overlap/test_corrupt_net.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
log = logging.getLogger(__name__)
def test_net(model, test_dataset, batch_size, loader_params, output_name='test_epoch', num_gpus=1):
model.eval()
sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
num_correct = 0
num_total = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(test_dataset))
stats = {'_type' : output_name, 'top1_err' : err}
lu.log_json_stats(stats)
return err
| augmentation-corruption-fbr_main | experiments/overlap/test_net.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResHead(nn.Module):
"""ResNet head."""
def __init__(self, w_in, nc):
super(ResHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
self.features = x
x = self.fc(x)
return x
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1"""
def __init__(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
super(BottleneckTransform, self).__init__()
self._construct(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _construct(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
(str1x1, str3x3) = (stride, 1) if stride_1x1 else (1, stride)
# 1x1, BN, ReLU
self.a = nn.Conv2d(
w_in, w_b, kernel_size=1,
stride=str1x1, padding=0, bias=False
)
self.a_bn = torch.nn.BatchNorm2d(w_b, **bn_params)
self.a_relu = nn.ReLU(inplace=relu_inplace)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
w_b, w_b, kernel_size=3,
stride=str3x3, padding=1, groups=num_gs, bias=False
)
self.b_bn = torch.nn.BatchNorm2d(w_b, **bn_params)
self.b_relu = nn.ReLU(inplace=relu_inplace)
# 1x1, BN
self.c = nn.Conv2d(
w_b, w_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.c_bn = torch.nn.BatchNorm2d(w_out, **bn_params)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBlock(nn.Module):
"""Residual block: x + F(x)"""
def __init__(
self, w_in, w_out, stride, w_b, num_gs, bn_params, stride_1x1, relu_inplace
):
super(ResBlock, self).__init__()
self._construct(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _add_skip_proj(self, w_in, w_out, stride, bn_params):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1,
stride=stride, padding=0, bias=False
)
self.bn = torch.nn.BatchNorm2d(w_out, **bn_params)
def _construct(self, w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride, bn_params)
self.f = BottleneckTransform(w_in, w_out, stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
self.relu = nn.ReLU(relu_inplace)
def forward(self, x):
fx = self.f(x)
if self.proj_block:
x = self.bn(self.proj(x))
x = x + fx
x = self.relu(x)
return x
class ResStage(nn.Module):
"""Stage of ResNet."""
def __init__(self, w_in, w_out, stride, d, w_b, num_gs,
bn_params, stride_1x1, relu_inplace):
super(ResStage, self).__init__()
self._construct(w_in, w_out, stride, d, w_b, num_gs,
bn_params, stride_1x1, relu_inplace)
def _construct(self, w_in, w_out, stride, d, w_b, num_gs, bn_params, stride_1x1, relu_inplace):
# Construct the blocks
for i in range(d):
# Stride and w_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
# Construct the block
res_block = ResBlock(
b_w_in, w_out, b_stride, w_b, num_gs,
bn_params, stride_1x1, relu_inplace
)
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class ResStem(nn.Module):
"""Stem of ResNet."""
def __init__(self, w_in, w_out, bn_params, relu_inplace):
super(ResStem, self).__init__()
self._construct_imagenet(w_in, w_out, bn_params, relu_inplace)
def _construct_imagenet(self, w_in, w_out, bn_params, relu_inplace):
# 7x7, BN, ReLU, maxpool
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=7,
stride=2, padding=3, bias=False
)
self.bn = torch.nn.BatchNorm2d(w_out, **bn_params)
self.relu = nn.ReLU(relu_inplace)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResNetPycls(nn.Module):
"""ResNeXt model."""
def __init__(self, depth=50, width_factor=1, num_groups=1, width_per_group=64,
num_classes=1000, bn_params={'eps':1e-5, 'momentum':0.1, 'affine':True},
stride_1x1=False, relu_inplace=True, final_gamma=True
):
super(ResNetPycls, self).__init__()
self.depth = depth
self.width = width_factor
self.ng = num_groups
self.width_per_group = width_per_group
self.num_classes = num_classes
self.bn_params = bn_params
self.stride_1x1 = stride_1x1
self.relu_inplace = relu_inplace
self._construct_imagenet()
def init_weights(m, cfg):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = (
hasattr(m, 'final_bn') and m.final_bn and
final_gamma
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
self.apply(lambda m : init_weights(m, final_gamma))
def _construct_imagenet(self):
# Retrieve the number of blocks per stage
(d1, d2, d3, d4) = _IN_STAGE_DS[self.depth]
# Compute the initial bottleneck width
num_gs = self.ng
w_b = self.width_per_group * num_gs
w1, w2, w3, w4 = [self.width * w for w in [256, 512, 1024, 2048]]
# Stem: (N, 3, 224, 224) -> (N, 64, 56, 56)
self.stem = ResStem(w_in=3, w_out=64, bn_params=self.bn_params, relu_inplace=self.relu_inplace)
# Stage 1: (N, 64, 56, 56) -> (N, 256, 56, 56)
self.s1 = ResStage(
w_in=64, w_out=w1, stride=1, d=d1,
w_b=w_b, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 2: (N, 256, 56, 56) -> (N, 512, 28, 28)
self.s2 = ResStage(
w_in=w1, w_out=w2, stride=2, d=d2,
w_b=w_b * 2, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 3: (N, 512, 56, 56) -> (N, 1024, 14, 14)
self.s3 = ResStage(
w_in=w2, w_out=w3, stride=2, d=d3,
w_b=w_b * 4, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Stage 4: (N, 1024, 14, 14) -> (N, 2048, 7, 7)
self.s4 = ResStage(
w_in=w3, w_out=w4, stride=2, d=d4,
w_b=w_b * 8, num_gs=num_gs,
bn_params=self.bn_params, stride_1x1=self.stride_1x1, relu_inplace=self.relu_inplace
)
# Head: (N, 2048, 7, 7) -> (N, num_classes)
self.head = ResHead(w_in=w4, nc=self.num_classes)
def forward(self, x):
for module in self.children():
x = module(x)
if isinstance(module, ResHead):
self.features = module.features
return x
| augmentation-corruption-fbr_main | experiments/overlap/models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from .utils import logging as lu
import numpy as np
import os
log = logging.getLogger(__name__)
def distributed_gather_features(curr_features, batch_size, num_gpus):
gather_list = [torch.zeros((batch_size, curr_features.size(-1)), device=curr_features.device)\
for i in range(num_gpus)]
count = curr_features.size(0)
if count < batch_size:
curr_features = torch.cat((curr_features, torch.zeros((batch_size - count, curr_features.size(-1)), device=curr_features.device)), dim=0)
torch.distributed.all_gather(gather_list, curr_features)
count = torch.Tensor([count]).cuda()
torch.distributed.all_reduce(count)
count = int(count.item())
# Here we use that the distributed data sampler interleaves sampling across replicas
curr_features = torch.stack(gather_list, dim=1).reshape(-1, curr_features.size(-1))
curr_features = curr_features[:count,:]
return curr_features
def extract_features(feature_extractor, dataset, batch_size, loader_params, average=True, num_gpus=1, average_num=None, preemption_protection=False, is_leader=True):
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=False)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=False
)
features = None
count = 0
starting_iter = -1
if preemption_protection and os.path.exists('feature_extraction.tmp.npz'):
data = np.loadz('feature_extraction.tmp.npz')
features = torch.Tensor(data['features']).cuda()
count = data['count']
starting_iter = data['curr_iter']
for curr_iter, (inputs, labels) in enumerate(loader):
if preemption_protection and curr_iter <= starting_iter:
continue
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
curr_features = feature_extractor.extract(inputs)
if average and average_num is None:
curr_features = torch.sum(curr_features, dim=0)
if num_gpus > 1:
torch.distributed.all_reduce(curr_features)
features = (features + curr_features.detach().cpu()) if features is not None else curr_features.detach().cpu()
elif average:
num_features = len(dataset) // average_num
if num_gpus > 1:
curr_features = distributed_gather_features(curr_features, batch_size, num_gpus)
if features is None:
features = torch.zeros(num_features, curr_features.size(-1))
if count + curr_features.size(0) > num_features:
remainder = count + curr_features.size(0) - num_features
features[count:, :] += curr_features[:num_features-count,:].detach().cpu()
offset = 0
while remainder > num_features:
features += curr_features[offset+num_features-count:2*num_features-count+offset].detach().cpu()
offset += num_features
remainder -= num_features
features[:remainder,:] += curr_features[offset+num_features-count:,:].detach().cpu()
count = remainder
else:
features[count:count+curr_features.size(0),:] += curr_features.detach().cpu()
count += curr_features.size(0)
count = count % num_features
else:
if num_gpus > 1:
curr_features = distributed_gather_features(curr_features, batch_size, num_gpus)
if features is None:
features = torch.zeros(len(dataset), curr_features.size(-1))
features[count:count+curr_features.size(0),:] = curr_features.detach().cpu()
count += curr_features.size(0)
if preemption_protection and curr_iter % 5000 == 0 and is_leader:
np.savez('feature_extraction.tmp.npz', features=features.detach().cpu().numpy(), count=count, curr_iter=curr_iter)
if average and average_num is None:
features /= len(dataset)
elif average:
features /= average_num
return features.detach().cpu().numpy()
| augmentation-corruption-fbr_main | experiments/overlap/extract_features.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import augmentations as aug
from .augmentations.utils.converters import NumpyToTensor, PilToNumpy
from .augmentations.utils.aug_finder import get_augs_by_tag, parse_aug_string, get_aug_by_name
from .augmentations.utils.severity import sample_level, int_parameter, float_parameter
from .augmentations import pil, compositions, obscure, patch_gaussian, standard_augmentations
import torchvision as tv
import torch
import numpy as np
import os
from PIL import Image, ImageOps
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
class Cifar10Base(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, augmentation=None, transform_file=None):
assert split in ['train', 'test'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
self.transform_weights = None
if self.train_aug:
train_transform = [
tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(im_size, padding=4)
]
else:
train_transform = []
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
if augmentation is None:
self.aug = aug.identity.Identity()
else:
self.aug = augmentation
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
if transform_file:
transforms = np.load(transform_file)
self.transform_list = transforms[:num_transforms]\
if num_transforms is not None else transforms
elif num_transforms:
self.transform_list = self.build_transform_list(num_transforms)
else:
self.transform_list = None
self.dataset = tv.datasets.CIFAR10(data_path, self.train, download=False)
def build_transform_list(self, num_transforms):
transforms = [self.aug.convert_to_numpy(self.aug.sample_parameters()) for i in range(num_transforms)]
return np.stack(transforms, axis=0)
def get_random_transform(self):
if self.transform_list is None:
return self.aug.sample_parameters()
elif self.transform_weights is None:
params = self.transform_list[np.random.randint(low=0, high=len(self.transform_list))]
return self.aug.convert_from_numpy(params)
else:
index = np.random.choice(np.arange(len(self.transform_list)), p=self.transform_weights)
params = self.transform_list[index]
return self.aug.convert_from_numpy(params)
def __getitem__(self, index):
pre_im, label = self.dataset[index]
pre_im = self.pretransform(pre_im)
params = self.get_random_transform()
return self.posttransform(self.aug.transform(pre_im, **params)), label
def __len__(self):
return len(self.dataset)
def fixed_transform(self, index, transform_index):
assert self.transform_list is not None, "Must have a fixed transform list to generate fixed transforms."
im, label = self.dataset[index]
im = self.pretransform(im)
params = self.aug.convert_from_numpy(self.transform_list[transform_index])
im = self.aug.transform(im, **params)
return self.posttransform(im), label
def serialize(self, indices=None):
'''
Returns a new dataset that is all fixed transforms in order,
applied to each index in order.
'''
class SerialDataset(torch.utils.data.Dataset):
def __init__(self, dataset, indices=None):
self.dataset = dataset
self.indices = indices
def __getitem__(self, index):
im_idx = index // len(self.dataset.transform_list)
im_idx = self.indices[im_idx] if self.indices is not None else im_idx
param_idx = index % len(self.dataset.transform_list)
return self.dataset.fixed_transform(im_idx, param_idx)
def __len__(self):
if self.indices is not None:
return len(self.indices) * len(self.dataset.transform_list)
else:
return len(self.dataset) * len(self.dataset.transform_list)
return SerialDataset(self, indices)
class Cifar10Augmix(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, width=3, depth=3, random_depth=True, prob_coeff=1.0,
severity=3, transform_file=None):
self.aug_string = aug_string
self.width = width
self.depth = depth
self.prob_coeff = prob_coeff
self.random_depth = random_depth
self.severity = severity
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.Augmix(
augmentation_list=augs,
width=width,
max_depth=depth,
random_depth=random_depth,
prob_coeff=prob_coeff
)
super(Cifar10Augmix, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, transform_file=transform_file)
class Cifar10RandomSample(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, severity=3, weights=None):
self.aug_string = aug_string
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.RandomSample(
augmentation_list=augs,
weights=weights
)
super(Cifar10RandomSample, self).__init__(data_path, split, im_size, train_aug, num_transforms, augmentation)
class Cifar10Corruption(Cifar10Base):
'''
Corruptions are different in three ways: they sample at fixed max intensity
of randomly between a low value and some maximum, they generate
fixed transforms in order and balanced (and can give the corruption/severity
of a given transform index), and have the metadata for the frost corruption.
'''
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, frost_path=None, include_extra=True, random_transforms=False):
self.include_extra = include_extra
self.random_transforms = random_transforms
if aug_string is not None:
self.aug_list = parse_aug_string(aug_string, im_size, max_intensity=True, frost_path=frost_path)
else:
augs = get_augs_by_tag(['imagenet_c'], [] if include_extra else ['extra'])
severities = [1,2,3,4,5]
self.aug_list = [a(severity=s, im_size=im_size, max_intensity=True, frost_path=frost_path)\
for a in augs for s in severities]
augmentation = compositions.RandomSample(
augmentation_list=self.aug_list
)
super(Cifar10Corruption, self).__init__(data_path, split, im_size, train_aug, num_transforms, augmentation)
def build_transform_list(self, num_transforms):
if self.random_transforms:
return super(Cifar10Corruption, self).build_transform_list(num_transforms)
assert num_transforms % len(self.aug_list) == 0,\
"The total number of augs needs to divide into the total number of transforms."
transform_list = None
for i in range(num_transforms):
transform_idx = i // (num_transforms // len(self.aug_list))
transform_params = self.aug_list[transform_idx].sample_parameters()
curr_record = self.aug.convert_to_numpy({
'idx' : transform_idx,
'transform_params' : transform_params
}).reshape(1,-1)
transform_list = np.concatenate((transform_list, curr_record), axis=0)\
if transform_list is not None else curr_record
return transform_list
def get_corruption(self, transform_index):
aug_type_index = transform_index // (len(self.transform_list) // len(self.aug_list))
return self.aug_list[aug_type_index].name, self.aug_list[aug_type_index].severity
class Cifar10AutoAugment(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, subpolicy_list=None, add_cutout=False, transform_file=None):
def stom(low, high, sev):
return sev / 10 * (high - low) + low
size = im_size
init = lambda transform : transform(0, size)
tn = 150/331 * im_size
if subpolicy_list is None:
subpolicy_list = [
[(init(pil.Invert), 0.1, None, None), (init(pil.Contrast), 0.2, stom(0,0.9,6), 1)],
[(init(pil.Rotate), 0.7, stom(0,30,2), 0), (init(pil.TranslateX), 0.3, stom(0,tn,9), 0)],
[(init(pil.Sharpness), 0.8, stom(0,0.9,1), 1), (init(pil.Sharpness), 0.9, stom(0,0.9,3), 1)],
[(init(pil.ShearY), 0.5, stom(0,0.3,8), 0), (init(pil.TranslateY), 0.7, stom(0,tn,9), 0)],
[(init(pil.AutoContrast), 0.5, None, None), (init(pil.Equalize), 0.9, None, None)],
[(init(pil.ShearY), 0.2, stom(0,0.3,7), 0), (init(pil.Posterize), 0.3, int(stom(4,8,7)), None)],
[(init(pil.ColorBalance), 0.4, stom(0,0.9,3),1), (init(pil.Brightness), 0.6, stom(0,0.9,7),1)],
[(init(pil.Sharpness), 0.3, stom(0,0.9,9),1), (init(pil.Brightness), 0.7, stom(0,0.9,9),1)],
[(init(pil.Equalize), 0.6, None, None), (init(pil.Equalize), 0.5, None, None)],
[(init(pil.Contrast), 0.6, stom(0,0.9,7),1), (init(pil.Sharpness), 0.6, stom(0,0.9,5),1)],
[(init(pil.ColorBalance), 0.7, stom(0,0.9,7),1), (init(pil.TranslateX), 0.5, stom(0,tn,8),0)],
[(init(pil.Equalize), 0.3, None, None), (init(pil.AutoContrast), 0.4, None, None)],
[(init(pil.TranslateY), 0.4, stom(0,tn,3),0), (init(pil.Sharpness), 0.2, stom(0,0.9,6),1)],
[(init(pil.Brightness), 0.9, stom(0,0.9,6),1), (init(pil.ColorBalance), 0.2, stom(0,0.9,8),1)],
[(init(pil.Solarize), 0.5, stom(256,0,2),None), (init(pil.Invert), 0.0, None,None)],
[(init(pil.Equalize), 0.2, None, None), (init(pil.AutoContrast), 0.6, None, None)],
[(init(pil.Equalize), 0.2, None, None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.ColorBalance), 0.9, stom(0,0.9,9),1), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.AutoContrast), 0.8, None, None), (init(pil.Solarize), 0.2, stom(256,0,8), None)],
[(init(pil.Brightness), 0.1, stom(0,0.9,3),1), (init(pil.ColorBalance), 0.7, stom(0,0.9,0),1)],
[(init(pil.Solarize), 0.4, stom(256,0,5), None), (init(pil.AutoContrast), 0.9, None, None)],
[(init(pil.TranslateY), 0.9, stom(0,tn,9), None), (init(pil.TranslateY), 0.7, stom(0,tn,9),0)],
[(init(pil.AutoContrast), 0.9, None, None), (init(pil.Solarize), 0.8, stom(256,0,3), None)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Invert), 0.1, None, None)],
[(init(pil.TranslateY), 0.7, stom(0,tn,9), 0), (init(pil.AutoContrast), 0.9, None, None)]
]
aug = compositions.AutoAugment(subpolicy_list)
if add_cutout:
cutout = obscure.CutOut(severity=10, im_size=im_size, max_intensity=True)
aug = compositions.ComposeSerially([aug, cutout])
super(Cifar10AutoAugment, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, transform_file=transform_file)
class Cifar10PatchGaussian(Cifar10Base):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, transform_file=None, patch_width=25, patch_sigma=1.0, max_width=True):
if patch_width is not None:
aug = patch_gaussian.PatchGaussian(
severity=None,
im_size=im_size,
max_intensity=max_width,
sigma=patch_sigma,
width=patch_width
)
else:
aug = patch_gaussian.Gaussian(
severity = patch_sigma * 10,
im_size=im_size,
max_intensity=max_width
)
if train_aug or (split=='train' and train_aug is None):
train = standard_augmentations.Cifar10CropAndFlip(severity=None, im_size=im_size)
aug = compositions.ComposeSerially([aug, train])
super(Cifar10PatchGaussian, self).__init__(data_path, split, im_size, False, num_transforms,
aug, transform_file=transform_file)
class ImageNetBase(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, augmentation=None, rgb_to_bgr=True):
assert split in ['train', 'val'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
data_path = os.path.join(data_path, split)
if self.train_aug:
train_transform = [
tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip(),
]
else:
train_transform = [
tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)
]
def RGB_to_BGR(image):
return image[[2,1,0],:,:]
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
if augmentation is None:
self.aug = aug.identity.Identity()
else:
self.aug = augmentation
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)] +
([RGB_to_BGR] if rgb_to_bgr else []) #PyCls imagenet models are trained in BGR input order
)
self.transform_list = self.build_transform_list(num_transforms)\
if num_transforms is not None else None
self.dataset = tv.datasets.ImageFolder(data_path, None)
def build_transform_list(self, num_transforms):
transforms = [self.aug.convert_to_numpy(self.aug.sample_parameters()) for i in range(num_transforms)]
return np.stack(transforms, axis=0)
def get_random_transform(self):
if self.transform_list is None:
return self.aug.sample_parameters()
else:
params = self.transform_list[np.random.randint(low=0, high=len(self.transform_list))]
return self.aug.convert_from_numpy(params)
def __getitem__(self, index):
pre_im, label = self.dataset[index]
pre_im = self.pretransform(pre_im)
params = self.get_random_transform()
return self.posttransform(self.aug.transform(pre_im, **params)), label
def __len__(self):
return len(self.dataset)
def fixed_transform(self, index, transform_index):
assert self.transform_list is not None, "Must have a fixed transform list to generate fixed transforms."
im, label = self.dataset[index]
im = self.pretransform(im)
params = self.aug.convert_from_numpy(self.transform_list[transform_index])
im = self.aug.transform(im, **params)
return self.posttransform(im), label
def serialize(self, indices=None):
'''
Returns a new dataset that is all fixed transforms in order,
applied to each index in order.
'''
class SerialDataset(torch.utils.data.Dataset):
def __init__(self, dataset, indices=None):
self.dataset = dataset
self.indices = indices
def __getitem__(self, index):
im_idx = index // len(self.dataset.transform_list)
im_idx = self.indices[im_idx] if self.indices is not None else im_idx
param_idx = index % len(self.dataset.transform_list)
return self.dataset.fixed_transform(im_idx, param_idx)
def __len__(self):
if self.indices is not None:
return len(self.indices) * len(self.dataset.transform_list)
else:
return len(self.dataset) * len(self.dataset.transform_list)
return SerialDataset(self, indices)
class ImageNetCorruption(ImageNetBase):
'''
Corruptions are different in three ways: they sample at fixed max intensity
of randomly between a low value and some maximum, they generate
fixed transforms in order and balanced (and can give the corruption/severity
of a given transform index), and have the metadata for the frost corruption.
'''
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, frost_path=None, include_extra=True, rgb_to_bgr=True):
self.include_extra = include_extra
if aug_string is not None:
self.aug_list = parse_aug_string(aug_string, im_size, max_intensity=True, frost_path=frost_path)
else:
augs = get_augs_by_tag(['imagenet_c'], [] if include_extra else ['extra'])
severities = [1,2,3,4,5]
self.aug_list = [a(severity=s, im_size=im_size, max_intensity=True, frost_path=frost_path)\
for a in augs for s in severities]
augmentation = compositions.RandomSample(
augmentation_list=self.aug_list
)
super(ImageNetCorruption, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, rgb_to_bgr)
def build_transform_list(self, num_transforms):
assert num_transforms % len(self.aug_list) == 0,\
"The total number of augs needs to divide into the total number of transforms."
transform_list = None
for i in range(num_transforms):
transform_idx = i // (num_transforms // len(self.aug_list))
transform_params = self.aug_list[transform_idx].sample_parameters()
curr_record = self.aug.convert_to_numpy({
'idx' : transform_idx,
'transform_params' : transform_params
}).reshape(1,-1)
transform_list = np.concatenate((transform_list, curr_record), axis=0)\
if transform_list is not None else curr_record
return transform_list
def get_corruption(self, transform_index):
aug_type_index = transform_index // (len(self.transform_list) // len(self.aug_list))
return self.aug_list[aug_type_index].name, self.aug_list[aug_type_index].severity
class ImageNetPatchGaussian(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, patch_width=250, patch_sigma=1.0, max_width=False, rgb_to_bgr=True):
if patch_width is not None:
aug = patch_gaussian.PatchGaussian(
severity=None,
im_size=im_size,
max_intensity=max_width,
sigma=patch_sigma,
width=patch_width
)
else:
aug = patch_gaussian.Gaussian(
severity = patch_sigma * 10,
im_size=im_size,
max_intensity=max_width
)
super(ImageNetPatchGaussian, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, rgb_to_bgr=rgb_to_bgr)
class ImageNetAutoAugment(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None, subpolicy_list=None, rgb_to_bgr=True):
def stom(low, high, sev):
return sev / 10 * (high - low) + low
size = im_size
init = lambda transform : transform(0, size)
tn = 150/331 * im_size
if subpolicy_list is None:
subpolicy_list = [
[(init(pil.Posterize), 0.4, int(stom(4,8,8)), None), (init(pil.Rotate), 0.6, stom(0,30,9),0)],
[(init(pil.Solarize), 0.6, stom(256,0,5), None), (init(pil.AutoContrast), 0.6, None,None)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Posterize), 0.6, int(stom(4,8,7)), None), (init(pil.Posterize), 0.6, int(stom(4,8,6)),None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Solarize), 0.2, stom(256,0,4),None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Rotate), 0.8, stom(0,30,8),0)],
[(init(pil.Solarize), 0.6, stom(256,0,3), None), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Posterize), 0.8, int(stom(4,8,5)), None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.Rotate), 0.2, stom(0,30,3), 0), (init(pil.Solarize), 0.6, stom(256,0,8),None)],
[(init(pil.Equalize), 0.6, None, None), (init(pil.Posterize), 0.4, int(stom(4,8,6)),None)],
[(init(pil.Rotate), 0.8, stom(0,30,8), 0), (init(pil.ColorBalance), 0.4, stom(0,0.9,0),1)],
[(init(pil.Rotate), 0.4, stom(0,30,9), 0), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Equalize), 0.0, None, None), (init(pil.Equalize), 0.8, None, None)],
[(init(pil.Invert), 0.6, None, None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.6, stom(0,0.9,4), 1), (init(pil.Contrast), 1.0, stom(0,0.9,8),1)],
[(init(pil.Rotate), 0.8, stom(0,30,8), 0), (init(pil.ColorBalance), 1.0, stom(0,0.9,2),1)],
[(init(pil.ColorBalance), 0.8, stom(0,0.9,8), 1), (init(pil.Solarize), 0.8, stom(256,0,7),None)],
[(init(pil.Sharpness), 0.4, stom(0,0.9,7), 1), (init(pil.Invert), 0.6, None, None)],
[(init(pil.ShearX), 0.6, stom(0,0.9,5), 1), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.4, stom(0,0.9,0), 1), (init(pil.Equalize), 0.6, None, None)],
[(init(pil.Equalize), 0.4, None, None), (init(pil.Solarize), 0.2, stom(256,0,4),None)],
[(init(pil.Solarize), 0.6, stom(256,0,5), None), (init(pil.AutoContrast), 0.6, None, None)],
[(init(pil.Invert), 0.6, None, None), (init(pil.Equalize), 1.0, None, None)],
[(init(pil.ColorBalance), 0.6, stom(0,0.9,4), 1), (init(pil.Contrast), 1.0, stom(0,0.9,8),1)],
[(init(pil.Equalize), 0.8, None, None), (init(pil.Equalize), 0.6, None, None)],
]
aug = compositions.AutoAugment(subpolicy_list)
super(ImageNetAutoAugment, self).__init__(data_path, split, im_size, train_aug, num_transforms,
aug, rgb_to_bgr=rgb_to_bgr)
class ImageNetAugmix(ImageNetBase):
def __init__(self, data_path, split, im_size, train_aug=None, num_transforms=None,
aug_string=None, width=3, depth=3, random_depth=True, prob_coeff=1.0,
severity=3, rgb_to_bgr=True):
self.aug_string = aug_string
self.width = width
self.depth = depth
self.prob_coeff = prob_coeff
self.random_depth = random_depth
self.severity = severity
if aug_string is not None:
augs = parse_aug_string(aug_string, im_size)
else:
augs = get_augs_by_tag(['augmix'])
augs = [a(severity=severity, im_size=im_size) for a in augs]
augmentation = compositions.Augmix(
augmentation_list=augs,
width=width,
max_depth=depth,
random_depth=random_depth,
prob_coeff=prob_coeff
)
super(ImageNetAugmix, self).__init__(data_path, split, im_size, train_aug, num_transforms,
augmentation, rgb_to_bgr=rgb_to_bgr)
class Cifar10AugmixJSD(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, train_aug=True,
augmix_width=3, augmix_depth=3, augmix_random_depth=True,
augmix_prob_coeff=1.0, augmix_severity=3,
jsd_num=3):
self.jsd_num = jsd_num
self.split = split
self.train = True if split=='train' else False
train_transform = [tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomCrop(im_size, padding=4)]\
if (self.train and train_aug) else []
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
self.posttransform = tv.transforms.Compose([NumpyToTensor(), tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)])
aug_list = [
pil.ShearX(augmix_severity, im_size=im_size),
pil.ShearY(augmix_severity, im_size=im_size),
pil.TranslateX(augmix_severity, im_size=im_size),
pil.TranslateY(augmix_severity, im_size=im_size),
pil.Rotate(augmix_severity, im_size=im_size),
pil.Equalize(augmix_severity, im_size=im_size),
pil.AutoContrast(augmix_severity, im_size=im_size),
pil.Solarize(augmix_severity, im_size=im_size),
pil.Posterize(augmix_severity, im_size=im_size)
]
self.aug = compositions.Augmix(
augmentation_list=aug_list,
width=augmix_width,
max_depth=augmix_depth,
random_depth=augmix_random_depth,
prob_coeff=augmix_prob_coeff
)
self.dataset = tv.datasets.CIFAR10(data_path, self.train, transform=None, download=False)
def __getitem__(self, index):
im, label = self.dataset[index]
im = self.pretransform(im)
im_one = self.posttransform(im)
ims = [self.posttransform(self.aug(im)) for i in range(self.jsd_num-1)]
c, h, w = im_one.size()
out = torch.stack([im_one] + ims, dim=0).view(c * self.jsd_num, h, w)
return out, label
def __len__(self):
return len(self.dataset)
class ImageNetAugmixJSD(torch.utils.data.Dataset):
def __init__(self, data_path, split, im_size, RGB_to_BGR=True, mixture_width=3, mixture_depth=-1, aug_severity=1, aug_prob_coeff=1, jsd_num=3):
self.split = split
self.train = True if split=='train' else False
self.im_size = im_size
self.RGB_to_BGR = RGB_to_BGR
self.train_transform = tv.transforms.Compose(
[tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip()])
self.test_transform = tv.transforms.Compose(
[tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)])
self.preprocess = tv.transforms.Compose(
[tv.transforms.ToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)])
data_path = os.path.join(data_path, split)
self.transform = self.train_transform if self.train else self.test_transform
self.dataset = tv.datasets.ImageFolder(data_path, None)
self.width = mixture_width
self.depth = mixture_depth
self.severity = aug_severity
self.prob_coeff = aug_prob_coeff
self.im_size = im_size
self.num = jsd_num
self.augmentations = [
self.rotate,
self.shear_x,
self.shear_y,
self.translate_x,
self.translate_y,
self.autocontrast,
self.posterize,
self.equalize,
self.solarize,
]
def _prepare_im(self, im):
im = self.preprocess(im)
if self.RGB_to_BGR:
im = im[[2,1,0],:,:]
return im
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
im, label = self.dataset[index]
im = self.transform(im)
ims = [self._prepare_im(im)] + [self.augment(im) for i in range(1,self.num)]
im = np.concatenate(ims, axis=0)
return im, label
def augment(self, im):
ws = np.float32(
np.random.dirichlet([self.prob_coeff] * self.width))
m = np.float32(np.random.beta(self.prob_coeff, self.prob_coeff))
mix = torch.zeros_like(self._prepare_im(im))
for i in range(self.width):
image_aug = im.copy()
depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.augmentations)
image_aug = op(image_aug, self.severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * self._prepare_im(image_aug)
mixed = (1 - m) * self._prepare_im(im) + m * mix
return mixed
def autocontrast(self, pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(self, pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(self, pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(self, pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(self, pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(self, pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(self, pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(self, pil_img, level):
level = int_parameter(sample_level(level), self.im_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(self, pil_img, level):
level = int_parameter(sample_level(level), self.im_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.im_size, self.im_size),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
class ImageNetSIN(torch.utils.data.Dataset):
def __init__(self, in_data_path, sin_data_path, split, im_size, train_aug=None, rgb_to_bgr=True):
assert split in ['train', 'val'], "Unknown split {}".format(split)
self.train = True if split=='train' else False
self.train_aug = self.train if train_aug is None else train_aug
in_data_path = os.path.join(in_data_path, split)
sin_data_path = os.path.join(sin_data_path, split)
if self.train_aug:
train_transform = [
tv.transforms.RandomResizedCrop(im_size, scale=(0.08,1.0)),
tv.transforms.RandomHorizontalFlip(),
]
else:
train_transform = [
tv.transforms.Resize(256),
tv.transforms.CenterCrop(im_size)
]
def RGB_to_BGR(image):
return image[[2,1,0],:,:]
self.pretransform = tv.transforms.Compose(train_transform + [PilToNumpy()])
self.posttransform = tv.transforms.Compose([
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)] +
([RGB_to_BGR] if rgb_to_bgr else []) #PyCls imagenet models are trained in BGR input order
)
self.transform = tv.transforms.Compose([
self.pretransform,
self.posttransform
])
self.dataset = torch.utils.data.ConcatDataset([
tv.datasets.ImageFolder(in_data_path, self.transform),
tv.datasets.ImageFolder(sin_data_path, self.transform)
])
def __getitem__(self, idx):
return self.dataset[idx]
def __len__(self):
return len(self.dataset)
| augmentation-corruption-fbr_main | experiments/overlap/datasets.py |
augmentation-corruption-fbr_main | experiments/overlap/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
import os
import time
import datetime
import torch.nn as nn
import torch.nn.functional as F
log = logging.getLogger(__name__)
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
def train_net(model, optimizer, train_dataset,
batch_size,
max_epoch,
loader_params,
lr_policy,
checkpoint_folder='checkpoints',
name=None,
save_period=1,
weights=None,
num_gpus=1,
is_leader=True,
jsd_num=3,
jsd_alpha=12.0):
chpk_pre = 'model_epoch_'
if name is not None:
chpk_pre = name + "_" + chpk_pre
chpk_post = '.pyth'
if os.path.exists(checkpoint_folder):
checkpoints = [c for c in os.listdir(checkpoint_folder) if chpk_post in c and chpk_pre == "_".join(c.split("_")[:-1]) +"_"]
else:
checkpoints = []
if weights:
checkpoint = torch.load(weights, map_location='cpu')
log.info("Pretrained weights provided. Loading model from {} and skipping training.".format(weights))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
return model
elif checkpoints:
last_checkpoint_name = os.path.join(checkpoint_folder, sorted(checkpoints)[-1])
checkpoint = torch.load(last_checkpoint_name, map_location='cpu')
log.info("Loading model from {}".format(last_checkpoint_name))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 1
if train_dataset is None:
return model
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=True
)
for i in range(start_epoch, max_epoch+1):
log.info("Starting epoch {}/{}".format(i, max_epoch))
time_start = time.time()
if sampler:
sampler.set_epoch(i)
train_epoch(model, optimizer, loader, lr_policy, i, num_gpus, jsd_num=jsd_num, jsd_alpha=jsd_alpha)
time_stop = time.time()
seconds_taken = (time_stop - time_start)
eta_td = datetime.timedelta(seconds=int(seconds_taken*(max_epoch-i)))
log.info("Seconds taken: {:.2f}, Time remaining: {}".format(seconds_taken, eta_str(eta_td)))
if (i % save_period == 0 or i == max_epoch) and is_leader:
if num_gpus > 1:
m = model.module
else:
m = model
checkpoint = {
'epoch' : i,
'model_state' : m.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
checkpoint_file = "{:s}{:04d}{:s}".format(chpk_pre, i, chpk_post)
if not os.path.exists(checkpoint_folder):
os.mkdir(checkpoint_folder)
checkpoint_file = os.path.join(checkpoint_folder, checkpoint_file)
log.info("Saving model to {}".format(checkpoint_file))
torch.save(checkpoint, checkpoint_file)
class JSDLoss(nn.Module):
def __init__(self, alpha=12.0, num=3):
super(JSDLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss()
self.alpha = alpha
self.num = num
def forward(self, preds, labels):
if not self.training:
return self.cross_entropy(preds, labels)
bs, nc = preds.size()
preds = preds.view(bs//self.num, self.num, nc)
clean_loss = self.cross_entropy(preds[:,0,:],labels)
p_preds = F.softmax(preds, dim=2)
p_mixture = torch.clamp(torch.mean(p_preds, dim=1, keepdim=True), 1e-7, 1).log()
p_mixture = p_mixture.repeat(1,self.num,1)
jsd_loss = F.kl_div(p_mixture, p_preds, reduction='batchmean') / self.num
return clean_loss + self.alpha * jsd_loss
def train_epoch(model, optimizer, loader, lr_policy, epoch, num_gpus=1, jsd_num=3, jsd_alpha=12.0):
lr = lr_policy(epoch-1)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model.train()
loss_fun = JSDLoss(alpha=jsd_alpha,num=jsd_num).cuda()
loss_fun.train()
avg_loss = 0.0
num_correct = 0
num_total = 0
num_batches = 0
for cur_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
bs, c, h, w = inputs.size()
inputs = inputs.view(bs*jsd_num, c//jsd_num, h, w) # Unpack jsd images
preds = model(inputs)
loss = loss_fun(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
preds = preds.view(bs, jsd_num, -1)
preds = preds[:,0,:]
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
torch.distributed.all_reduce(loss)
avg_loss += loss.item()
num_correct += correct.item()
num_total += labels.size(0) * num_gpus
num_batches += num_gpus
avg_loss /= num_batches
err = 100 * (1 - num_correct / num_total)
log.info("Avg loss: {:.3f}, Avg err: {:.3f}".format(avg_loss, err))
| augmentation-corruption-fbr_main | experiments/overlap/train_net_jsd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from hydra.utils import instantiate
from .train_net import train_net
class Network(object):
def __init__(self, model_cfg, optim_cfg, dataset_cfg, data_loader, num_gpus=1, is_leader=True):
cur_device = torch.cuda.current_device()
self.model = instantiate(model_cfg).cuda(device=cur_device)
if num_gpus > 1:
self.model = torch.nn.parallel.DistributedDataParallel(
module=self.model,
device_ids=[cur_device],
output_device=cur_device
)
self.optimizer = instantiate(optim_cfg, self.model.parameters())
if optim_cfg.max_epoch > 0:
self.dataset = instantiate(dataset_cfg)
else:
self.dataset = None
self.batch_size = dataset_cfg.batch_size
self.max_epoch = optim_cfg.max_epoch
self.loader_params = data_loader
self.lr_policy = instantiate(optim_cfg.lr_policy)
self.save_period = dataset_cfg.checkpoint_period
self.weights = dataset_cfg.weights
self.num_gpus = num_gpus
self.is_leader = is_leader
def train(self):
train_net(self.model,
self.optimizer,
self.dataset,
self.batch_size,
self.max_epoch,
self.loader_params,
self.lr_policy,
save_period=self.save_period,
name='ft',
weights=self.weights,
num_gpus=self.num_gpus,
is_leader=self.is_leader
)
self.model.eval()
def extract(self, x):
preds = self.model(x)
if self.num_gpus > 1:
return self.model.module.features
else:
return self.model.features
| augmentation-corruption-fbr_main | experiments/overlap/feature_extractor.py |
# This source code is adapted from code licensed under the MIT license
# found in third_party/wideresnet_license from the root directory of
# this source tree.
"""WideResNet implementation (https://arxiv.org/abs/1605.07146)."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
"""Basic ResNet block."""
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = drop_rate
self.is_in_equal_out = (in_planes == out_planes)
self.conv_shortcut = (not self.is_in_equal_out) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.is_in_equal_out:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.is_in_equal_out:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
if not self.is_in_equal_out:
return torch.add(self.conv_shortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
"""Layer container for blocks."""
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
drop_rate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, drop_rate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
drop_rate):
layers = []
for i in range(nb_layers):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
"""WideResNet class."""
def __init__(self, depth, num_classes, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
n_channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(
3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
drop_rate)
# 2nd block
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
drop_rate)
# 3rd block
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(n_channels[3], num_classes)
self.n_channels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.n_channels)
self.features = out #Expose penultimate layer for access as features
return self.fc(out)
# Stage depths for ImageNet models
_IN_STAGE_DS = {
18: (2, 2, 2, 2),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
| augmentation-corruption-fbr_main | experiments/overlap/wideresnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
import os
import time
import datetime
log = logging.getLogger(__name__)
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
def train_net(model, optimizer, train_dataset,
batch_size,
max_epoch,
loader_params,
lr_policy,
checkpoint_folder='checkpoints',
name=None,
save_period=1,
weights=None,
num_gpus=1,
is_leader=True):
chpk_pre = 'model_epoch_'
if name is not None:
chpk_pre = name + "_" + chpk_pre
chpk_post = '.pyth'
if os.path.exists(checkpoint_folder):
checkpoints = [c for c in os.listdir(checkpoint_folder) if chpk_post in c and chpk_pre == "_".join(c.split("_")[:-1]) +"_"]
else:
checkpoints = []
if weights:
checkpoint = torch.load(weights, map_location='cpu')
log.info("Pretrained weights provided. Loading model from {} and skipping training.".format(weights))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
return model
elif checkpoints:
last_checkpoint_name = os.path.join(checkpoint_folder, sorted(checkpoints)[-1])
checkpoint = torch.load(last_checkpoint_name, map_location='cpu')
log.info("Loading model from {}".format(last_checkpoint_name))
if num_gpus > 1:
model.module.load_state_dict(checkpoint['model_state'])
else:
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 1
if train_dataset is None:
return model
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\
if num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=loader_params.num_workers,
pin_memory=loader_params.pin_memory,
drop_last=True
)
for i in range(start_epoch, max_epoch+1):
log.info("Starting epoch {}/{}".format(i, max_epoch))
time_start = time.time()
if sampler:
sampler.set_epoch(i)
train_epoch(model, optimizer, loader, lr_policy, i, num_gpus)
time_stop = time.time()
seconds_taken = (time_stop - time_start)
eta_td = datetime.timedelta(seconds=int(seconds_taken*(max_epoch-i)))
log.info("Seconds taken: {:.2f}, Time remaining: {}".format(seconds_taken, eta_str(eta_td)))
if (i % save_period == 0 or i == max_epoch) and is_leader:
if num_gpus > 1:
m = model.module
else:
m = model
checkpoint = {
'epoch' : i,
'model_state' : m.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
checkpoint_file = "{:s}{:04d}{:s}".format(chpk_pre, i, chpk_post)
if not os.path.exists(checkpoint_folder):
os.mkdir(checkpoint_folder)
checkpoint_file = os.path.join(checkpoint_folder, checkpoint_file)
log.info("Saving model to {}".format(checkpoint_file))
torch.save(checkpoint, checkpoint_file)
def train_epoch(model, optimizer, loader, lr_policy, epoch, num_gpus=1):
lr = lr_policy(epoch-1)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model.train()
loss_fun = torch.nn.CrossEntropyLoss().cuda()
avg_loss = 0.0
num_correct = 0
num_total = 0
num_batches = 0
for cur_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
loss = loss_fun(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if num_gpus > 1:
torch.distributed.all_reduce(correct)
torch.distributed.all_reduce(loss)
avg_loss += loss.item()
num_correct += correct.item()
num_total += labels.size(0) * num_gpus
num_batches += num_gpus
avg_loss /= num_batches
err = 100 * (1 - num_correct / num_total)
log.info("Avg loss: {:.3f}, Avg err: {:.3f}".format(avg_loss, err))
| augmentation-corruption-fbr_main | experiments/overlap/train_net.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import simplejson
import decimal
import logging
log = logging.getLogger(__name__)
_TAG = 'json_stats: '
def log_json_stats(stats):
"""Logs json stats."""
# Decimal + string workaround for having fixed len float vals in logs
stats = {
k: decimal.Decimal('{:.6f}'.format(v)) if isinstance(v, float) else v
for k, v in stats.items()
}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
log.info('{:s}{:s}'.format(_TAG, json_stats))
def load_json_stats(log_file):
"""Loads json_stats from a single log file."""
with open(log_file, 'r') as f:
lines = f.readlines()
json_lines = [l[l.find(_TAG) + len(_TAG):] for l in lines if _TAG in l]
json_stats = [simplejson.loads(l) for l in json_lines]
return json_stats
def parse_json_stats(log, row_type, key):
"""Extract values corresponding to row_type/key out of log."""
vals = [row[key] for row in log if row['_type'] == row_type and key in row]
if key == 'iter' or key == 'epoch':
vals = [int(val.split('/')[0]) for val in vals]
return vals
| augmentation-corruption-fbr_main | experiments/overlap/utils/logging.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class Cosine(object):
def __init__(self, base_lr, max_epoch):
self.base_lr = base_lr
self.max_epoch = max_epoch
def __call__(self, cur_epoch):
return 0.5 * self.base_lr * (1.0 + np.cos(np.pi * cur_epoch / self.max_epoch))
class Steps(object):
def __init__(self, base_lr, lr_mult, steps):
self.base_lr = base_lr
self.lr_mult = lr_mult
self.steps = steps
def __call__(self, cur_epoch):
ind = [i for i, s in enumerate(self.steps) if cur_epoch >= s][-1]
return self.base_lr * self.lr_mult ** ind
| augmentation-corruption-fbr_main | experiments/overlap/utils/lr_policy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from scipy.ndimage import gaussian_filter
from .utils.severity import float_parameter, int_parameter, sample_level
from .utils.image import bilinear_interpolation, smoothstep
import numpy as np
class CausticRefraction(Augmentation):
tags = ['distortion']
name = 'caustic_refraction'
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
eta = 4.0
lens_scale = float_parameter(sample_level(self.severity, self.max_intensity), 0.5*self.im_size)
lighting_amount = float_parameter(sample_level(self.severity, self.max_intensity), 2.0)
softening = 1
return { 'time' : time, 'size' : size, 'eta' : eta, 'lens_scale' : lens_scale, 'lighting_amount': lighting_amount, 'softening' : softening}
def transform(self, image, time, size, eta, lens_scale, lighting_amount, softening):
def caustic_noise_kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
def refract(incident, normal, eta):
if np.abs(np.dot(incident, normal)) >= 1.0 - 1e-3:
return incident
angle = np.arccos(np.dot(incident, normal))
out_angle = np.arcsin(np.sin(angle) / eta)
out_unrotated = np.array([np.cos(out_angle), np.sin(out_angle), 0.0])
spectator_dim = np.cross(incident, normal)
spectator_dim /= np.linalg.norm(spectator_dim)
orthogonal_dim = np.cross(normal, spectator_dim)
rotation_matrix = np.stack((normal, orthogonal_dim, spectator_dim), axis=0)
return np.matmul(np.linalg.inv(rotation_matrix), out_unrotated)
def luma_at_offset(image, origin, offset):
pixel_value = image[origin[0]+offset[0], origin[1]+offset[1], :]\
if origin[0]+offset[0] >= 0 and origin[0]+offset[0] < image.shape[0]\
and origin[1]+offset[1] >= 0 and origin[1]+offset[1] < image.shape[1]\
else np.array([0.0,0.0,0])
return np.dot(pixel_value, np.array([0.2126, 0.7152, 0.0722]))
def luma_based_refract(point, image, caustics, eta, lens_scale, lighting_amount):
north_luma = luma_at_offset(caustics, point, np.array([0,-1]))
south_luma = luma_at_offset(caustics, point, np.array([0, 1]))
west_luma = luma_at_offset(caustics, point, np.array([-1, 0]))
east_luma = luma_at_offset(caustics, point, np.array([1,0]))
lens_normal = np.array([east_luma - west_luma, south_luma - north_luma, 1.0])
lens_normal = lens_normal / np.linalg.norm(lens_normal)
refract_vector = refract(np.array([0.0, 0.0, 1.0]), lens_normal, eta) * lens_scale
refract_vector = np.round(refract_vector, 3)
#print(refract_vector)
out_pixel = bilinear_interpolation(image, point+refract_vector[0:2])
out_pixel += (north_luma - south_luma) * lighting_amount
out_pixel += (east_luma - west_luma) * lighting_amount
return np.clip(out_pixel, 0, 1)
noise = np.array([[caustic_noise_kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = gaussian_filter(noise, sigma=softening)
image = image.astype(np.float32) / 255
out = np.array([[luma_based_refract(np.array([y,x]), image, noise, eta, lens_scale, lighting_amount)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((out * 255).astype(np.uint8), 0, 255)
class PinchAndTwirl(Augmentation):
tags = ['distortion']
name = 'pinch_and_twirl'
def sample_parameters(self):
centers = [np.random.randint(low=0, high=self.im_size, size=2) for i in range(5)]
radius = self.im_size // 4
#amounts = np.random.uniform(low=0.2, high=1.0, size=5)
#angles = np.random.uniform(low=-np.pi, high=np.pi, size=5)
angles = [float_parameter(sample_level(self.severity, self.max_intensity), np.pi/4)-float_parameter(sample_level(self.severity, True), np.pi/8)\
for i in range(5)]
amounts = [float_parameter(sample_level(self.severity, self.max_intensity), 0.4) + 0.1\
for i in range(5)]
return {'centers' : centers, 'radius' : radius, 'amounts' : amounts, 'angles' : angles}
def transform(self, image, centers, radius, amounts, angles):
def warp_kernel(point, center, radius, amount, angle):
dx = point[0] - center[0]
dy = point[1] - center[1]
dist = np.linalg.norm(point - center)
if dist > radius or np.round(dist, 3) == 0.0:
return point
d = dist / radius
t = np.sin(np.pi * 0.5 * d) ** (- amount)
dx *= t
dy *= t
e = 1 - d
a = angle * (e ** 2)
out = center + np.array([dx*np.cos(a) - dy*np.sin(a), dx*np.sin(a) + dy*np.cos(a)])
return out
image = image.astype(np.float32)
for center, angle, amount in zip(centers, angles, amounts):
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, amount, angle))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class PinchAndTwirlV2(Augmentation):
tags = ['distortion']
name = 'pinch_and_twirl_v2'
def sample_parameters(self):
num_per_axis = 5 if self.im_size==224 else 3
#angles = np.array([float_parameter(sample_level(self.severity, self.max_intensity), np.pi)-float_parameter(sample_level(self.severity, True), np.pi/2)\
# for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
#if self.im_size == 224:
angles = np.array([np.random.choice([1,-1]) * float_parameter(sample_level(self.severity, self.max_intensity), np.pi/2) for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
#else:
# angles = np.array([np.random.choice([1,-1]) * (float_parameter(sample_level(self.severity, self.max_intensity), np.pi/4)+np.pi/4) for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
amount = float_parameter(sample_level(self.severity, self.max_intensity), 0.4) + 0.1
return {'num_per_axis' : num_per_axis, 'angles' : angles, 'amount' : amount}
def transform(self, image, num_per_axis, angles, amount):
def warp_kernel(point, center, radius, amount, angle):
dx = point[0] - center[0]
dy = point[1] - center[1]
dist = np.linalg.norm(point - center)
if dist > radius or np.round(dist, 3) == 0.0:
return point
d = dist / radius
t = np.sin(np.pi * 0.5 * d) ** (- amount)
dx *= t
dy *= t
e = 1 - d
a = angle * (e ** 2)
out = center + np.array([dx*np.cos(a) - dy*np.sin(a), dx*np.sin(a) + dy*np.cos(a)])
return out
out = image.copy().astype(np.float32)
grid_size = self.im_size // num_per_axis
radius = grid_size / 2
for i in range(num_per_axis):
for j in range(num_per_axis):
l, r = i * grid_size, (i+1) * grid_size
u, d = j * grid_size, (j+1) * grid_size
center = np.array([u+radius, l+radius])
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, amount, angles[i,j]))\
for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class FishEye(Augmentation):
tags = ['distortion']
name = 'fish_eye'
def sample_parameters(self):
centers = [np.random.randint(low=0, high=self.im_size, size=2) for i in range(5)]
etas = [float_parameter(sample_level(self.severity, self.max_intensity), 1.0)+1.0\
for i in range(5)]
radii = [np.random.uniform(low=0.1, high=0.3) * self.im_size for i in range(5)]
return {'centers' : centers, 'radii' : radii, 'etas': etas}
def transform(self, image, centers, radii, etas):
def warp_kernel(point, center, a, b, eta):
dx = point[0] - center[0]
dy = point[1] - center[1]
x2 = dx**2
y2 = dy**2
a2 = a**2
b2 = b**2
if (y2 >= (b2 - b2*x2/a2)):
return point
r = 1.0 / eta
z = np.sqrt((1.0 - x2/a2 - y2/b2) * (a*b))
z2 = z**2
x_angle = np.arccos(dx / np.sqrt(x2+z2))
angle_1 = np.pi/2 - x_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - x_angle - angle_2
out_x = point[0] - np.tan(angle_2)*z
#print(np.tan(angle_2)*z)
y_angle = np.arccos(dy / np.sqrt(y2+z2))
angle_1 = np.pi/2 - y_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - y_angle - angle_2
out_y = point[1] - np.tan(angle_2)*z
return np.array([out_x, out_y])
for center, radius, eta in zip(centers, radii, etas):
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, radius, eta))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class FishEyeV2(Augmentation):
tags = ['distortion']
name = 'fish_eye_v2'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
#density = float_parameter(sample_level(self.severity, self.max_intensity), 0.01)
density = 0.01 * 224**2 / (self.im_size**2)
#eta = 2
eta = float_parameter(sample_level(self.severity, self.max_intensity), 2.0) + 1.0
radius = max(0.05 * self.im_size, 3)
return {'seed' : seed, 'density' : density, 'eta': eta, 'radius' : radius}
def transform(self, image, density, eta, radius, seed):
def warp_kernel(point, center, a, b, eta):
dx = point[0] - center[0]
dy = point[1] - center[1]
x2 = dx**2
y2 = dy**2
a2 = a**2
b2 = b**2
if (y2 >= (b2 - b2*x2/a2)):
return point
r = 1.0 / eta
z = np.sqrt((1.0 - x2/a2 - y2/b2) * (a*b))
z2 = z**2
x_angle = np.arccos(dx / np.sqrt(x2+z2))
angle_1 = np.pi/2 - x_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - x_angle - angle_2
out_x = point[0] - np.tan(angle_2)*z
#print(np.tan(angle_2)*z)
y_angle = np.arccos(dy / np.sqrt(y2+z2))
angle_1 = np.pi/2 - y_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - y_angle - angle_2
out_y = point[1] - np.tan(angle_2)*z
return np.array([out_x, out_y])
random_state = np.random.RandomState(seed=seed)
num = int(density * self.im_size**2)
out = image.copy().astype(np.float32)
for i in range(num):
center = random_state.uniform(low=0, high=self.im_size, size=2)
l = max(np.floor(center[1]-radius).astype(np.int), 0)
r = min(np.ceil(center[1]+radius).astype(np.int), self.im_size)
u = max(np.floor(center[0]-radius).astype(np.int), 0)
d = min(np.ceil(center[0]+radius).astype(np.int), self.im_size)
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, radius, eta)) for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class WaterDrop(Augmentation):
tags = ['distortion']
name = 'water_drop'
def sample_parameters(self):
center = np.array([self.im_size //2, self.im_size//2])
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
radius = self.im_size//2
amplitude = float_parameter(sample_level(self.severity, self.max_intensity), 0.25)
wavelength = np.random.uniform(low=0.05, high=0.2) * self.im_size
phase = np.random.uniform(low=0.0, high=2*np.pi)
return {'center': center, 'radius' : radius, 'amplitude' : amplitude, 'wavelength' : wavelength, 'phase': phase}
def transform(self, image, center, radius, amplitude, wavelength, phase):
def warp_kernel(point, center, radius, amplitude, wavelength, phase):
dx, dy = point - center
dist = np.linalg.norm(point-center)
if dist > radius:
return point
amount = amplitude * np.sin(dist / wavelength * np.pi * 2 - phase)
if dist != 0.0:
amount *= wavelength / dist
return point + amount * (point - center)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, amplitude, wavelength, phase))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class Ripple(Augmentation):
tags = ['distortion']
name = 'ripple'
def sample_parameters(self):
amplitudes = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 0.025)\
for i in range(2)]) * self.im_size
wavelengths = np.random.uniform(low=0.1, high=0.3, size=2) * self.im_size
phases = np.random.uniform(low=0, high=2*np.pi, size=2)
return {'amplitudes' : amplitudes, 'wavelengths' : wavelengths, 'phases' : phases}
def transform(self, image, wavelengths, phases, amplitudes):
def warp_kernel(point, wavelengths, phases, amplitudes):
return point + amplitudes * np.sin(2 * np.pi * point / wavelengths + phases)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), wavelengths, phases, amplitudes))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class ColorHalfTone(Augmentation):
tags = ['distortion']
name = 'color_half_tone'
def sample_parameters(self):
#angles = np.array([108, 162, 90]) * np.pi/180
angles = np.random.uniform(low=0, high=2*np.pi, size=3)
dot_area = float_parameter(sample_level(self.severity, self.max_intensity), 9*np.pi)
dot_radius = np.sqrt(dot_area/np.pi)
return {'angles' : angles, 'dot_radius' : dot_radius}
def transform(self, image, angles, dot_radius):
grid_size = 2 * dot_radius * 1.414
mx = [0, -1, 1, 0, 0]
my = [0, 0, 0, -1, 1]
out = np.zeros_like(image)
for y in range(self.im_size):
for c in range(3):
angle = angles[c]
cos = np.cos(angle)
sin = np.sin(angle)
for x in range(self.im_size):
tx = cos * x + sin * y
ty = - sin * x + cos * y
tx = tx - (tx - grid_size/2) % grid_size + grid_size/2
ty = ty - (ty - grid_size/2) % grid_size + grid_size/2
f = 1
for i in range(5):
ttx = tx + mx[i]*grid_size
tty = ty + my[i]*grid_size
ntx = cos * ttx - sin * tty
nty = sin * ttx + cos * tty
nx = np.clip(int(ntx), 0, self.im_size-1)
ny = np.clip(int(nty), 0, self.im_size-1)
l = image[nx, ny, c] / 255
l = 1 - l**2
l *= grid_size/2 * 1.414
dx = x-ntx
dy = y-nty
r = np.linalg.norm(np.array([dx,dy]))
f2 = 1-smoothstep(r, r+1, l)
f = min(f, f2)
out[x, y, c] = f
return np.clip(255 * out, 0, 255).astype(np.uint8)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/distortion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from .utils.image import bilinear_interpolation
from .utils.severity import float_parameter, int_parameter, sample_level
from scipy.ndimage import shift, zoom, grey_erosion, grey_dilation
import numpy as np
from PIL import Image
from scipy.ndimage import rotate
class Scatter(Augmentation):
tags = ['blur', 'filterpedia', 'scatter']
name = 'scatter'
def sample_parameters(self):
seed = np.random.uniform(low=0.0, high=10.0)
radius = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size/10)
return {'seed' : seed, 'radius' : radius}
def transform(self, image, seed, radius):
def noise(x, y, seed):
i, j = np.sin(x * seed), np.cos(y * seed)
return (np.sin(12.9898*i + 78.233*j) * 43758.5453) % 1
def warp_kernel(x, y, seed, radius):
x_offset = radius * (-1.0 + noise(x, y, seed) * 2)
y_offset = radius * (-1.0 + noise(y, x, seed) * 2)
x_new = min(max(0, x+x_offset), self.im_size-1)
y_new = min(max(0, y+y_offset), self.im_size-1)
return y_new, x_new
out = np.array([[bilinear_interpolation(image, warp_kernel(x, y, seed, radius))\
for x in range(self.im_size)] for y in range(self.im_size)])
return out.astype(np.uint8)
class ChromaticAbberation(Augmentation):
tags = ['blur', 'color', 'chromatic_abberation']
name = 'chromatic_abberation'
def sample_parameters(self):
# shifts = np.array([int_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 6)\
# for i in range(6)]).reshape(3,2)
angles = np.random.uniform(low=0, high=2*np.pi, size=3)
dists = np.array([float_parameter(sample_level(self.severity, self.max_intensity), self.im_size / 10)\
for i in range(3)])
shifts = np.array([[np.cos(a)*d, np.sin(a)*d] for a, d in zip(angles, dists)])
# flip = np.random.choice([-1,1], size=(3,2))
# shifts = shifts * flip
return { 'rgb_shifts' : shifts}
def transform(self, image, rgb_shifts):
# max_pad = np.max(np.abs(rgb_shifts))
# image_padded = np.pad(image, [(max_pad, max_pad), (max_pad, max_pad), (0,0)])
out = image.copy()
for i in range(3):
out[:,:,i] = shift(image[:,:,i], rgb_shifts[i], prefilter=False)
#h, w, _ = image.shape
#for i in range(3):
# out[:,:,i] = image_padded[max_pad+rgb_shifts[i,0]:max_pad+h+rgb_shifts[i,0],max_pad+rgb_shifts[i,1]:max_pad+w+rgb_shifts[i,1],i]
return out
def convert_to_numpy(self, params):
return params['rgb_shifts'].flatten()
def convert_from_numpy(self, numpy_record):
return {'rgb_shifts' : numpy_record.reshape(3,2).astype(np.int)}
class TransverseChromaticAbberation(Augmentation):
tags = ['blur', 'color', 'pil', 'transverse_chromatic_abberation']
name = 'transverse_chromatic_abberation'
def sample_parameters(self):
scales = np.array([float_parameter(sample_level(self.severity,self.max_intensity), 0.5)\
for i in range(3)])
scale = float_parameter(sample_level(self.severity, self.max_intensity), 0.5)
scales = np.array([1.0, 1.0+scale/2, 1.0+scale])
scales = scales[np.random.permutation(3)]
#zerod = np.random.randint(low=0, high=3)
#scales[zerod] = 0.0
#flip = np.random.choice([-1, 1], size=3)
#scales = flip * scales
#scales = 2 ** scales
return { 'scales' : scales }
def transform(self, image, scales):
out = image.copy()
for c in range(3):
zoomed = zoom(image[:,:,c], scales[c], prefilter=False)
edge = (zoomed.shape[0]-self.im_size)//2
out[:,:,c] = zoomed[edge:edge+self.im_size, edge:edge+self.im_size]
return out.astype(np.uint8)
'''
image = Image.fromarray(image)
channel_list = []
for i, channel in enumerate(image.getbands()):
im = image.getchannel(channel)
affine = np.array([[scales[i], 0, (1-scales[i])*self.im_size/2], [0, scales[i], (1-scales[i])*self.im_size/2]])
im = im.transform((self.im_size, self.im_size), Image.AFFINE, affine.flatten())
channel_list.append(im)
out = Image.merge("RGB", channel_list)
return np.array(out).astype(np.uint8)
'''
def convert_to_numpy(self, params):
return params['scales'].flatten()
def convert_from_numpy(self, numpy_record):
return {'scales' : numpy_record}
class HomogeneousColorBlur(Augmentation):
tags = ['blur', 'filterpedia', 'homogenous_color_blur', 'slow', 'impractically_slow']
name = 'homogeneous_color_blur'
def sample_parameters(self):
radius = int_parameter(sample_level(self.severity,self.max_intensity), self.im_size/10)
threshold = np.random.uniform(low=0.2, high=0.21)
return { 'radius' : radius, 'threshold' : threshold }
def transform(self, image, radius, threshold):
def kernel(point, image, radius, threshold):
this_val = image[point[0],point[1],:]
acc = np.zeros(3)
n = 0
for x in np.arange(-radius, radius+1):
for y in np.arange(-radius, radius+1):
x_pos = point[0]+x
y_pos = point[1]+y
if x_pos < 0 or x_pos >= self.im_size or y_pos < 0 or y_pos >= self.im_size:
continue
offset_val = image[x_pos,y_pos,:]
dist_mul = 1 if radius >= np.sqrt(x**2+y**2) else 0
color_mul = 1 if 255*threshold >= np.sqrt(np.sum((this_val-offset_val)**2)) else 0
acc += offset_val * dist_mul * color_mul
n += dist_mul * color_mul
return acc / n
out = np.array([[kernel(np.array([y,x]), image, radius, threshold)\
for x in range(self.im_size)] for y in range(self.im_size)])
return out.astype(np.uint8)
class Erosion(Augmentation):
tags = ['blur']
name = 'erosion'
def sample_parameters(self):
r2 = float_parameter(sample_level(self.severity, self.max_intensity), (5**2-1.5**2)) + 1.5**2
radius = np.sqrt(r2)
return {'radius' : radius}
def transform(self, image, radius):
mask = np.zeros((np.ceil(2*radius).astype(np.uint8), np.ceil(2*radius).astype(np.uint8)))
center = np.array([radius, radius])
for x in range(mask.shape[0]):
for y in range(mask.shape[1]):
mask[x,y] = 1 if np.linalg.norm(np.array([x,y])-center) <= radius else 0
if np.max(mask) == 0:
return image
out = image.copy()
for c in range(3):
out[:,:,c] = grey_erosion(out[:,:,c], footprint=mask)
return out
class Dilation(Augmentation):
tags = ['blur']
name = 'dilation'
def sample_parameters(self):
r2 = float_parameter(sample_level(self.severity, self.max_intensity), (5**2-1.5**2)) + 1.5**2
radius = np.sqrt(r2)
return {'radius' : radius}
def transform(self, image, radius):
mask = np.zeros((np.ceil(2*radius).astype(np.uint8), np.ceil(2*radius).astype(np.uint8)))
center = np.array([radius, radius])
for x in range(mask.shape[0]):
for y in range(mask.shape[1]):
mask[x,y] = 1 if np.linalg.norm(np.array([x,y])-center) <= radius else 0
if np.max(mask) == 0:
return image
out = image.copy()
for c in range(3):
out[:,:,c] = grey_dilation(out[:,:,c], footprint=mask)
return out
class CircularMotionBlur(Augmentation):
tags = ['blur']
name = 'circular_motion_blur'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity, self.max_intensity),15)
return {'amount' : amount}
def transform(self, image, amount):
num = 21
factors = []
rotated = []
image = image.astype(np.float32) / 255
for i in range(num):
angle = (2*i/(num-1) - 1) * amount
rotated.append(rotate(image, angle, reshape=False))
factors.append(np.exp(- 2*(2*i/(num-1)-1)**2))
out = np.zeros_like(image)
for i, f in zip(rotated, factors):
out += f * i
out /= sum(factors)
return np.clip(out*255, 0, 255).astype(np.uint8)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/blurs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from math import floor, ceil
import numpy as np
class Gaussian(Augmentation):
name = 'pg_gaussian'
tags = ['float_return']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
sigma = np.random.uniform(low=0, high=self.severity/10)\
if not self.max_intensity else self.severity/10
return {'seed': seed, 'sigma': sigma}
def transform(self, image, seed, sigma):
random_state = np.random.RandomState(seed=seed)
noise = random_state.randn(self.im_size, self.im_size, 3)
image = image.astype(np.float32) / 255
image = np.clip(image+sigma*noise, 0, 1)
return image * 255
class PatchGaussian(Augmentation):
name = 'patch_gaussian'
tags = ['float_return', 'additional_parameters']
def __init__(self, severity, im_size, record=False, max_intensity=False, sigma=1.0, width=None):
super(PatchGaussian, self).__init__(severity, im_size, record, max_intensity)
self.sigma = sigma
self.width = width if width is not None else self.im_size
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
crop_pos = np.random.randint(low=0, high=self.im_size, size=2)
width = np.random.uniform(low=0, high=self.width)\
if not self.max_intensity else self.width
sigma = np.random.uniform(low=0, high=self.sigma)
return {'seed': seed, 'crop_pos': crop_pos, 'sigma': sigma, 'width': width}
def transform(self, image, seed, crop_pos, sigma, width):
random_state = np.random.RandomState(seed=seed)
noise = random_state.randn(self.im_size, self.im_size, 3)
noise *= sigma
mask = np.zeros((self.im_size, self.im_size))
l = int(max(0, crop_pos[0]-floor(width/2)))
r = int(min(self.im_size, crop_pos[0]+ceil(width/2)))
u = int(max(0, crop_pos[1]-floor(width/2)))
d = int(min(self.im_size, crop_pos[1]+ceil(width/2)))
mask[l:r,u:d] = 1.0
mask = mask.reshape(self.im_size, self.im_size, 1)
image = image.astype(np.float32) / 255
image = image + mask * noise
image = np.clip(image, 0, 1)
return image * 255
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/patch_gaussian.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
import numpy as np
from .utils.severity import int_parameter, float_parameter, sample_level
from .utils.image import smoothstep
from skimage.color import rgb2hsv, hsv2rgb
class BleachBypass(Augmentation):
tags = ['color', 'filterpedia', 'bleach_bypass']
name = 'bleach_bypass'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity,self.max_intensity), 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image*vals, axis=2, keepdims=True)/255
l = np.clip(10.0 * (luma - 0.45), 0, 1.0)
result1 = 2 * image * luma / 255
result2 = 1.0 - 2.0 * (1.0 - luma) * (1.0 - image /255)
out = ((1-l) * result1 + l * result2) * 255
return ((1-amount) * image + amount * out).astype(np.uint8)
class Technicolor(Augmentation):
tags = ['color', 'filterpedia', 'technicolor']
name = 'technicolor'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity,self.max_intensity), 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
redmatte = 1.0 - (image[:,:,0]/255 - ((image[:,:,1]/2+image[:,:,2]/2))/255)
greenmatte = 1.0 - (image[:,:,1]/255 - ((image[:,:,0]/2+image[:,:,2]/2))/255)
bluematte = 1.0 - (image[:,:,2]/255 - ((image[:,:,0]/2+image[:,:,1]/2))/255)
red = greenmatte * bluematte * image[:,:,0].astype(np.float32)
green = redmatte * bluematte * image[:,:,1].astype(np.float32)
blue = redmatte * greenmatte * image[:,:,2].astype(np.float32)
new_color = np.stack((red, green, blue), axis=2)
return ((1-amount) * image + amount * new_color).astype(np.uint8)
class Pseudocolor(Augmentation):
tags = ['color', 'filterpedia', 'pseudocolor']
name = 'pseudocolor'
def sample_parameters(self):
smoothness = np.random.uniform(low=0.25, high=0.75)
color0 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color1 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color2 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color3 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color4 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
amount = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
return { 'smoothness' : smoothness, 'color0' : color0, 'color1': color1,
'color2': color2, 'color3' : color3, 'color4' : color4, 'amount' : amount }
def transform(self, image, color0, color1, color2, color3, color4, smoothness, amount):
color0 = color0.astype(np.uint8)
color1 = color1.astype(np.uint8)
color2 = color2.astype(np.uint8)
color3 = color3.astype(np.uint8)
color4 = color4.astype(np.uint8)
def get_color(color0, color1, edge0, edge1, luma, smoothness):
smooth_color = color0 + ((color1 - color0) * smoothstep(edge0, edge1, luma))
a = 4.0 * (luma - edge0)
linear_color = (1 - a) * color0 + a * color1
return (1 - smoothness) * linear_color + smoothness * smooth_color
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image.astype(np.float32)*vals, axis=2, keepdims=True)/255
c1 = get_color(color0, color1, 0.0, 0.25, luma, smoothness)
c2 = get_color(color1, color2, 0.25, 0.50, luma, smoothness)
c3 = get_color(color2, color3, 0.5, 0.75, luma, smoothness)
c4 = get_color(color3, color4, 0.75, 1.0, luma, smoothness)
out = (luma < 0.25) * c1 + ((luma >= 0.25)&(luma < 0.5)) * c2\
+ ((luma >= 0.5)&(luma < 0.75)) * c3 + (luma >= 0.75) * c4
return np.clip((1 - amount) * image + amount * out, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
colors = []
for i in range(5):
colors.extend(params['color'+str(i)].tolist())
return np.array([params['smoothness']] + colors + [params['amount']])
def convert_from_numpy(self, numpy_record):
params = {'smoothness' : numpy_record[0], 'amount' : numpy_record[16]}
for i in range(5):
params['color'+str(i)] = numpy_record[1+3*i:1+3*(i+1)]
return params
class HueShift(Augmentation):
tags = ['color']
name = 'hue_shift'
def sample_parameters(self):
amount = float_parameter(sample_level(self.severity, self.max_intensity), 0.5)
if np.random.uniform() < 0.5:
amount *= -1
return {'amount' : amount}
def transform(self, image, amount):
hsv_image = rgb2hsv(image.astype(np.float32)/255)
hsv_image[:,:,0] += (amount % 1.0)
return np.clip(hsv2rgb(hsv_image)*255, 0, 255).astype(np.uint8)
class ColorDither(Augmentation):
tags = ['color']
name = 'color_dither'
def sample_parameters(self):
#factor = float_parameter(sample_level(self.severity, self.max_intensity),6.0)+1.0
#levels = int(256 / (2**factor))
levels = int_parameter(sample_level(self.severity, self.max_intensity),10)
levels = 14-levels
return {'levels' : levels}
def transform(self, image, levels):
index = 0
color_map = [int(255 * i / (levels -1)) for i in range(levels)]
div = [int(levels*i / 256) for i in range(256)]
out = np.zeros_like(image)
image_copy = image.copy()
m = np.array([[0,0,0],[0,0,7],[3,5,1]])
for y in range(self.im_size):
reverse = ((y % 1) == 1)
if reverse:
index = y*self.im_size + self.im_size - 1
direction = -1
else:
index = y*self.im_size
direction = 1
for x in range(self.im_size):
curr_val = image_copy[index//self.im_size, index%self.im_size,:]
new_val = np.array([color_map[div[c]] for c in curr_val])
out[index//self.im_size, index%self.im_size,:] = new_val
e = curr_val - new_val
for i in [-1,0,1]:
iy = y+i
if iy > 0 and iy < self.im_size:
for j in [-1,0,1]:
jx = x+j
if jx > 0 and jx < self.im_size:
if reverse:
w = m[(i+1),-j+1]
else:
w = m[(i+1),j+1]
if w != 0:
k = index - j if reverse else index + j
curr_val = image_copy[k//self.im_size, k%self.im_size,:].astype(np.float32)
curr_val = np.clip(curr_val + e * w/np.sum(m),0,255).astype(np.uint8)
image_copy[k//self.im_size,k%self.im_size,:] = curr_val
index += direction
return np.clip(out, 0, 255).astype(np.uint8)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/color.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import identity
from . import base
from . import pil
from . import obscure
from . import additive_noise
from . import color
from . import compositions
from . import blurs
from . import imagenetc
from . import utils
from . import distortion
from . import standard_augmentations
from . import patch_gaussian
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from .utils.severity import float_parameter, int_parameter, sample_level
from .utils.image import smoothstep
from .utils.noise import PerlinNoiseGenerator
from scipy.fftpack import ifft2
import numpy as np
class SingleFrequencyGreyscale(Augmentation):
tags = ['additive_noise', 'single_frequency_greyscale']
name = 'single_frequency_greyscale'
def sample_parameters(self):
freq_mag = np.random.uniform(low=-np.pi, high=np.pi)
freq_2 = np.random.uniform(low=-abs(freq_mag), high=abs(freq_mag))
freq = np.array([freq_mag, freq_2])[np.random.permutation(2)]
phase = np.random.uniform(low=0, high=2*np.pi)
intensity = float_parameter(sample_level(self.severity,self.max_intensity), 196)
return { 'freq' : freq, 'phase' : phase, 'intensity' : intensity}
def transform(self, image, freq, phase, intensity):
noise = np.array([[np.sin(x * freq[0] + y * freq[1] + phase)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['freq'].tolist() + [params['phase'], params['intensity']])
def convert_from_numpy(self, numpy_record):
return {'freq' : numpy_record[0:2],
'phase' : numpy_record[2],
'intensity' : numpy_record[3]
}
class SingleFrequencyColor(Augmentation):
tags = ['additive_noise', 'color', 'single_frequency_color']
name = 'single_frequency_color'
def sample_parameters(self):
freq = np.random.uniform(low=0, high=np.pi, size=2)
phase = np.random.uniform(low=0, high=2*np.pi)
intensity = [float_parameter(sample_level(self.severity,self.max_intensity), 196) for i in range(3)]
return { 'freq' : freq, 'phase' : phase, 'intensity' : intensity}
def transform(self, image, freq, phase, intensity):
noise = np.array([[np.sin(x * freq[0] + y * freq[1] + phase)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((intensity[0] * noise, intensity[1] * noise, intensity[2] * noise), axis=2)
return np.clip(image + noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['freq'].tolist() + [params['phase']] + params['intensity'])
def convert_from_numpy(self, numpy_record):
return {'freq' : numpy_record[0:2],
'phase' : numpy_record[2],
'intensity' : numpy_record[3:6].tolist()
}
class CocentricSineWaves(Augmentation):
tags = ['additive_noise', 'filterpedia', 'color', 'cocentric_sine_waves']
name = 'cocentric_sine_waves'
def sample_parameters(self):
offset = np.random.uniform(low=0, high=self.im_size, size=2)
freq = np.random.uniform(low=0, high=10)
amplitude = np.random.uniform(low=0, high=self.im_size/10)
ring_width = np.random.uniform(low=0, high=self.im_size/10)
intensity = [float_parameter(sample_level(self.severity,self.max_intensity), 128) for i in range(3)]
return { 'offset' : offset,
'freq' : freq,
'amplitude' : amplitude,
'ring_width' : ring_width,
'intensity' : intensity
}
def transform(self, image, offset, freq, amplitude, ring_width, intensity):
def calc_intensity(x, y, x0, y0, freq, amplitude, ring_width):
angle = np.arctan2(x-x0, y-y0) * freq
distance = ((np.sqrt((x-x0)**2 + (y-y0)**2) + np.sin(angle) * amplitude) % ring_width) / ring_width
distance -= 1/2
return distance
noise = np.array([[calc_intensity(x, y, offset[0], offset[1], freq, amplitude, ring_width)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((intensity[0] * noise, intensity[1] * noise, intensity[2] * noise), axis=2)
return np.clip(image + noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['offset'].tolist() + [params['freq'], params['amplitude'], params['ring_width']] + params['intensity'])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0:2].tolist(),
'freq' : numpy_record[2],
'amplitude' : numpy_record[3],
'ring_width' : numpy_record[4],
'intensity' : numpy_record[4:7].tolist()
}
class PlasmaNoise(Augmentation):
tags = ['additive_noise', 'color', 'filterpedia', 'plasma_noise', 'slow']
name = 'plasma_noise'
def sample_parameters(self):
time = np.random.uniform(low=0.0, high=6*np.pi)
iterations = np.random.randint(low=4, high=7)
sharpness = np.random.uniform(low=0.5, high=1.0)
scale = np.random.uniform(low=0.075, high=0.2) * self.im_size
intensity = float_parameter(sample_level(self.severity,self.max_intensity),64)
return {'time' : time, 'iterations' : iterations, 'sharpness' : sharpness,
'scale' : scale, 'intensity' : intensity}
def transform(self, image, time, iterations, sharpness, scale, intensity):
def kernel(x, y, rand, iters, sharp, scale):
x /= scale
y /= scale
i = np.array([1.0, 1.0, 1.0, 0.0])
for s in range(iters):
r = np.array([np.cos(y * i[0] - i[3] + rand / i[1]), np.sin(x * i[0] - i[3] + rand / i[1])]) / i[2]
r += np.array([-r[1],r[0]]) * 0.3
x += r[0]
y += r[1]
i *= np.array([1.93, 1.15, (2.25 - sharp), rand * i[1]])
r = np.sin(x - rand)
b = np.sin(y + rand)
g = np.sin((x + y + np.sin(rand))*0.5)
return [r,g,b]
noise = np.array([[kernel(x,y, time, iterations, sharpness, scale)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((1-intensity/255) * image + intensity * noise, 0, 255).astype(np.uint8)
class VoronoiNoise(Augmentation):
tags = ['additive_noise', 'filterpedia', 'voronoi_noise', 'slow']
name = 'voronoi_noise'
def sample_parameters(self):
seed = np.random.uniform(low=0, high=10)
density = np.random.uniform(low=0.5, high=0.9)
size = np.random.uniform(low=0.05, high=0.2) * self.im_size
intensity = float_parameter(sample_level(self.severity,self.max_intensity),255)
if np.random.uniform() > 0.5:
intensity = -intensity
return {'seed' : seed, 'density' : density, 'size' : size, 'intensity' : intensity}
def transform(self, image, size, seed, density, intensity):
def voronoi_hash(v, time):
m = np.array([[13.85, 47.77], [99.41, 88.48]])
w = np.matmul(m,v)
return (np.sin(w) * np.sqrt(w) * time * 0.0025) % 1
def kernel(x, y, size, seed, density):
v = np.array([[x],[y]]) / size + 1
g = v // 1
f = v % 1
dist = 1.0
for i in [-1,0,1]:
for j in [-1,0,1]:
p = np.array([[i],[j]])
curr_dist = np.linalg.norm((p + voronoi_hash(g+p, seed) - f).flatten())
dist = min(curr_dist, dist)
r = smoothstep(0, 1, dist * density) - 0.5
return r
noise = np.array([[kernel(x,y, size, seed, density)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class CausticNoise(Augmentation):
tags = ['additive_noise', 'filterpedia']
name = 'caustic_noise'
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
intensity = float_parameter(sample_level(self.severity,self.max_intensity), 255)
return { 'time' : time, 'size' : size, 'intensity' : intensity}
def transform(self, image, time, size, intensity):
def kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
noise = np.array([[kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
#return np.clip(255 * noise, 0, 255).astype(np.uint8)
class Sparkles(Augmentation):
tags = ['additive_noise']
name = 'sparkles'
def sample_parameters(self):
centers = np.random.uniform(low=0, high=self.im_size, size=(5, 2))
radii = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 0.1)\
for i in range(5)]) * self.im_size
#radii = np.array([0.1 for i in range(5)]) * self.im_size
#amounts = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 50)\
# for i in range(5)])
amounts = np.array([50 for i in range(5)])
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
nrays = np.random.randint(low=50, high=200, size=5)
return {'centers' : centers, 'radii' : radii, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'nrays' : nrays, 'amounts' : amounts
}
def transform(self, image, centers, radii, nrays, amounts, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return value + f * (color - value)
random_state = np.random.RandomState(seed=seed)
for center, rays, amount, radius in zip(centers, nrays, amounts, radii):
ray_lengths = [max(1,radius + randomness / 100.0 * radius * random_state.randn())\
for i in range(rays)]
image = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class InverseSparkles(Augmentation):
tags = ['additive_noise']
name = 'inverse_sparkles'
def sample_parameters(self):
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
#radius = self.im_size // 4
#radius = float_parameter(sample_level(self.severity, self.max_intensity), 0.5)
#radius = (0.75 - radius) * self.im_size
radius = 0.25 * self.im_size
#amount = 25
amount = 100
amount = float_parameter(sample_level(self.severity, self.max_intensity), 65)
amount = 100 - amount
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
rays = np.random.randint(low=50, high=200)
return {'center' : center, 'radius' : radius, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'rays' : rays, 'amount' : amount
}
def transform(self, image, center, radius, rays, amount, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return color + f * (value - color)
random_state = np.random.RandomState(seed=seed)
ray_lengths = [radius + randomness / 100.0 * radius * random_state.randn()\
for i in range(rays)]
out = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class PerlinNoise(Augmentation):
tags = ['additive_noise']
name = 'perlin_noise'
def sample_parameters(self):
m = np.array([[1,0],[0,1]]) / (32 * self.im_size / 224)
turbulence = 16.0
gain = 0.5
bias = 0.5
alpha = float_parameter(sample_level(self.severity, self.im_size), 0.50)
seed = np.random.randint(low=0, high=2**32)
return {'m': m, 'turbulence' : turbulence, 'seed': seed,
'gain': gain, 'bias': bias, 'alpha': alpha}
def transform(self, image, m, turbulence, seed, gain, bias, alpha):
random_state = np.random.RandomState(seed=seed)
noise = PerlinNoiseGenerator(random_state)
def kernel(point, m, turbulence, gain, bias):
npoint = np.matmul(point, m)
f = noise.turbulence(npoint[0], npoint[1], turbulence)\
if turbulence != 1.0 else noise.noise(npoint[0], npoint[1])
f = gain * f + bias
return np.clip(np.array([f,f,f]),0,1.0)
noise = np.array([[kernel(np.array([y,x]),m,turbulence,gain, bias) for x in range(self.im_size)]\
for y in range(self.im_size)])
out = (1 - alpha) * image.astype(np.float32) + 255 * alpha * noise
return np.clip(out, 0, 255).astype(np.uint8)
class BlueNoise(Augmentation):
tags = ['additive_noise']
name = 'blue_noise'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(sample_level(self.severity, self.max_intensity), 196)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
#power = power / self.im_size
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
#luma_noise = noise.reshape(self.im_size, self.im_size, 1) * np.array([[[0.2126, 0.7152, 0.0722]]])
#return np.clip(image + intensity * luma_noise, 0, 255).astype(np.uint8)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class BrownishNoise(Augmentation):
tags = ['additive_noise']
name = 'brownish_noise'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(sample_level(self.severity, self.max_intensity), 64)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[1/(np.linalg.norm(np.array([x,y])-center)**2+1)\
for x in range(self.im_size)] for y in range(self.im_size)])
#power = power / self.im_size
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
#luma_noise = noise.reshape(self.im_size, self.im_size, 1) * np.array([[[0.2126, 0.7152, 0.0722]]])
#return np.clip(image + intensity * luma_noise, 0, 255).astype(np.uint8)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/additive_noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
import numpy as np
class Cifar10CropAndFlip(Augmentation):
def sample_parameters(self):
crop_pos = np.random.randint(low=-4, high=5, size=2)
flip = (np.random.uniform() < 0.5)
return {'crop_pos': crop_pos, 'flip': flip}
def transform(self, image, crop_pos, flip):
image = np.pad(image, ((4,4),(4,4),(0,0)))
pos = crop_pos+4
image = image[pos[0]:pos[0]+self.im_size,pos[1]:pos[1]+self.im_size,:]
if flip:
image = np.ascontiguousarray(image[:,::-1,:])
return image.astype(np.uint8)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/standard_augmentations.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from .utils.severity import float_parameter, int_parameter, sample_level
from PIL import Image, ImageOps, ImageEnhance
import numpy as np
class AutoContrast(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'autocontrast']
name = 'autocontrast'
def sample_parameters(self):
return {}
def transform(self, image):
im = ImageOps.autocontrast(Image.fromarray(image))
return np.array(im)
class Equalize(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'equalize']
name = 'equalize'
def sample_parameters(self):
return {}
def transform(self, image):
im = ImageOps.equalize(Image.fromarray(image))
return np.array(im)
class Posterize(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'posterize']
name = 'posterize'
def sample_parameters(self):
bits = 4 - int_parameter(sample_level(self.severity,self.max_intensity), 4)
return {'bits' : bits}
def transform(self, image, bits):
im = ImageOps.posterize(Image.fromarray(image), int(bits))
return np.array(im)
class Solarize(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'color', 'solarize']
name = 'solarize'
def sample_parameters(self):
threshold = 256 - int_parameter(sample_level(self.severity,self.max_intensity), 256)
return {'threshold' : threshold}
def transform(self, image, threshold):
im = ImageOps.solarize(Image.fromarray(image), threshold)
return np.array(im)
class Affine(Augmentation):
tags = ['pil', 'spatial', 'affine']
name = 'affine'
def sample_parameters(self):
offset_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
affine_matrix = np.array([[factor_x, offset_x, shift_x],[offset_y, factor_y, shift_y]])
return {'affine_matrix' : affine_matrix}
def transform(self, image, affine_matrix):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.AFFINE,
affine_matrix.flatten(),
resample=Image.BILINEAR
)
return np.array(im)
def convert_to_numpy(self, params):
return params['affine_matrix'].flatten()
def convert_from_numpy(self, numpy_record):
return {'affine_matrix' : numpy_record.reshape(2,3)}
class ShearX(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'shear_x']
name = 'shear_x'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, offset, 0],[0, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class ShearY(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'shear_y']
name = 'shear_y'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), 0.3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, 0, 0],[offset, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class TranslateX(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'translate_x']
name = 'translate_x'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, 0, offset],[0, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class TranslateY(Affine):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'translate_y']
name = 'translate_y'
def sample_parameters(self):
offset = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
offset = -offset
return {'offset' : offset}
def transform(self, image, offset):
affine_matrix = np.array([[1, 0, 0],[0, 1, offset]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['offset']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0]}
class Rotate(Augmentation):
tags = ['autoaugment', 'augmix', 'pil', 'spatial', 'rotate']
name = 'rotate'
def sample_parameters(self):
degrees = float_parameter(sample_level(self.severity,self.max_intensity), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return {'degrees' : degrees}
def transform(self, image, degrees):
im = Image.fromarray(image)
im = im.rotate(degrees, resample=Image.BILINEAR)
return np.array(im)
class Invert(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'invert']
name = 'invert'
def sample_parameters(self):
return {}
def transform(self, image):
im = ImageOps.invert(Image.fromarray(image))
return np.array(im)
class ColorBalance(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'color_balance']
name = 'color_balance'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Color(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class Sharpness(Augmentation):
tags = ['autoaugment', 'pil', 'blur', 'sharpness']
name = 'sharpness'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Sharpness(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class Contrast(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'imagenet_c_overlap', 'contrast']
name = 'contrast_pil'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Contrast(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class Brightness(Augmentation):
tags = ['autoaugment', 'pil', 'color', 'imagenet_c_overlap', 'brightness']
name = 'brightness_pil'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Brightness(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class ScaleX(Affine):
tags = ['pil', 'spatial', 'scale_x']
name = 'scale_x'
def sample_parameters(self):
factor = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor = -factor
factor = 2 ** factor
return {'factor' : factor}
def transform(self, image, factor):
affine_matrix = np.array([[factor, 0, (1-factor)*self.im_size/2],[0, 1, 0]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['factor']])
def convert_from_numpy(self, numpy_record):
return {'factor' : numpy_record[0]}
class ScaleY(Affine):
tags = ['pil', 'spatial', 'scale_y']
name = 'scale_y'
def sample_parameters(self):
factor = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor = -factor
factor = 2 ** factor
return {'factor' : factor}
def transform(self, image, factor):
affine_matrix = np.array([[1, 0, 0],[0, factor, (1-factor)*self.im_size/2]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['factor']])
def convert_from_numpy(self, numpy_record):
return {'factor' : numpy_record[0]}
class ScaleFixedAspectRatio(Affine):
tags = ['pil', 'spatial', 'scale_fixed_aspect_ratio']
name = 'scale_fixed_aspect_ratio'
def sample_parameters(self):
factor = float_parameter(sample_level(self.severity,self.max_intensity), 0.5)
if np.random.uniform() > 0.5:
factor = -factor
factor = 2 ** factor
return {'factor' : factor}
def transform(self, image, factor):
affine_matrix = np.array([[factor, 0, (1-factor)*self.im_size/2],[0, factor, (1-factor)*self.im_size/2]])
return super().transform(image, affine_matrix)
def convert_to_numpy(self, params):
return np.array([params['factor']])
def convert_from_numpy(self, numpy_record):
return {'factor' : numpy_record[0]}
class Quadrilateral(Augmentation):
tags = ['pil', 'spatial', 'quadrilateral']
name = 'quadrilateral'
def sample_parameters(self):
points = np.array([
[0,0],
[0, self.im_size],
[self.im_size, self.im_size],
[self.im_size, 0]
]).astype(np.float32)
shift = float_parameter(self.severity, self.im_size / 3) * np.random.uniform(low=-1,high=1, size=(4,2))
points += shift
return {'points' : points}
def transform(self, image, points):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
return np.array(im)
def convert_to_numpy(self, params):
return params['points'].flatten()
def convert_from_numpy(self, numpy_record):
return {'points' : numpy_record.reshape(4,2)}
class QuadrilateralNoBars(Augmentation):
tags = ['pil', 'spatial', 'quadrilateral_no_bars']
name = 'quadrilateral_no_bars'
def sample_parameters(self):
points = np.array([
[0,0],
[0, self.im_size],
[self.im_size, self.im_size],
[self.im_size, 0]
]).astype(np.float32)
shift = float_parameter(self.severity, self.im_size / 3) * np.random.uniform(low=-1,high=1, size=(4,2))
points += shift
return {'points' : points}
def transform(self, image, points):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['points'].flatten()
def convert_from_numpy(self, numpy_record):
return {'points' : numpy_record.reshape(4,2)}
class KeystoneH(Quadrilateral):
tags = ['pil', 'spatial', 'keystone_h']
name = 'keystone_h'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift = - shift
return {'shift' : shift}
def transform(self, image, shift):
points = np.array([
[0,shift],
[0, self.im_size-shift],
[self.im_size, self.im_size+shift],
[self.im_size, -shift],
])
return super().transform(image, points)
def convert_to_numpy(self, params):
return np.array([params['shift']])
def convert_from_numpy(self, numpy_record):
return {'shift' : numpy_record[0]}
class KeystoneV(Quadrilateral):
tags = ['pil', 'spatial', 'keystone_v']
name = 'keystone_v'
def sample_parameters(self):
shift = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 3)
if np.random.uniform() > 0.5:
shift = - shift
return {'shift' : shift}
def transform(self, image, shift):
points = np.array([
[shift,0],
[-shift, self.im_size],
[self.im_size+shift, self.im_size],
[self.im_size-shift, 0]
])
return super().transform(image, points)
def convert_to_numpy(self, params):
return np.array([params['shift']])
def convert_from_numpy(self, numpy_record):
return {'shift' : numpy_record[0]}
class Perspective(Augmentation):
tags = ['pil', 'spatial', 'perspective']
name = 'perspective'
def sample_parameters(self):
offset_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_x = 0.0
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_y = 0.0
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
denom_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_x = denom_x
denom_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_y = denom_y
perspective_params = np.array([factor_x, offset_x, shift_x,offset_y, factor_y, shift_y, denom_x, denom_y])
return {'perspective_params' : perspective_params}
def transform(self, image, perspective_params):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
return np.array(im)
def convert_to_numpy(self, params):
return params['perspective_params']
def convert_from_numpy(self, numpy_record):
return {'perspective_params' : numpy_record}
class PerspectiveNoBars(Augmentation):
tags = ['pil', 'spatial', 'perspective_no_bars']
name = 'perspective_no_bars'
def sample_parameters(self):
offset_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.1)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_x = 0.0
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(sample_level(self.severity,self.max_intensity), self.im_size / 10)
#shift_y = 0.0
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.15)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
denom_x = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_x = denom_x
denom_y = float_parameter(sample_level(self.severity,self.max_intensity), 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_y = denom_y
perspective_params = np.array([factor_x, offset_x, shift_x,offset_y, factor_y, shift_y, denom_x, denom_y])
return {'perspective_params' : perspective_params}
def transform(self, image, perspective_params):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['perspective_params']
def convert_from_numpy(self, numpy_record):
return {'perspective_params' : numpy_record}
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/pil.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from math import floor, ceil
import numpy as np
from .base import Augmentation
from .utils.severity import int_parameter, sample_level, float_parameter
from skimage.draw import line_aa
from scipy.fftpack import ifft2
class CutOut(Augmentation):
tags = ['autoaugment', 'cutout']
name = 'cutout'
def sample_parameters(self):
center = np.random.randint(low=0, high=self.im_size, size=2)
size = int_parameter(sample_level(self.severity, self.max_intensity), 15)+1
return {'center' : center, 'size' : size}
def transform(self, image, center, size):
out = image.copy()
lu = np.clip(center-floor(size/2), 0, self.im_size)
rd = np.clip(center+ceil(size/2), 0, self.im_size)
out[lu[0]:rd[0],lu[1]:rd[1],:] = [128,128,128]
return out
def convert_to_numpy(self, params):
return np.array(params['center'].tolist() + [params['size']])
def convert_from_numpy(self, numpy_record):
return {'center' : numpy_record[0:2].astype(np.int), 'size' : numpy_record[2]}
'''
class CheckerBoardCutOut(Augmentation):
tags = ['checkerboard_cutout']
name = 'checkerboard_cutout'
def sample_parameters(self):
if self.max_intensity:
size = max(1, int(self.severity))
else:
size = np.random.randint(low=1, high=max(1,int(self.severity))+1)
offset = np.random.randint(low=0, high=size+1, size=2)
return { 'offset' : offset, 'size' : size}
def transform(self, image, offset, size):
out = image.copy()
num = self.im_size // size + 2
for i in range(num):
for j in range(num):
if (i+j) % 2 == 0:
continue
l = np.clip((i-1)*size+offset[0],0,self.im_size)
r = np.clip((i)*size+offset[0],0,self.im_size)
u = np.clip((j-1)*size+offset[1],0,self.im_size)
d = np.clip((j)*size+offset[1],0,self.im_size)
out[l:r,u:d,:] = [128,128,128]
return out
def convert_to_numpy(self, params):
return np.array(params['offset'].tolist() + [params['size']])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0:2].astype(np.int), 'size' : numpy_record[2].astype(np.int)}
'''
'''
class CheckerBoardCutOut(Augmentation):
tags = ['obscure']
name = 'checkerboard_cutout'
def sample_parameters(self):
angle = np.random.uniform(low=0, high=2*np.pi)
#scales = np.array([0.5, 0.5]) * self.im_size
scales = np.array([float_parameter(sample_level(self.severity, self.max_intensity), 1.0)\
for i in range(2)])
scales = np.maximum((1.1 - scales) * 0.25 * self.im_size, 1)
return {'angle' : angle, 'scales' : scales}
def transform(self, image, scales, angle):
def mask_kernel(point, scales, angle):
nx = (np.cos(angle) * point[0] + np.sin(angle) * point[1]) / scales[0]
ny = (-np.sin(angle) * point[0] + np.cos(angle) * point[1]) / scales[1]
return int(nx % 2) != int(ny % 2)
out = np.array([[image[y,x,:] if mask_kernel([y,x], scales, angle) else np.array([128,128,128])\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
'''
class CheckerBoardCutOut(Augmentation):
tags = ['obscure']
name = 'checkerboard_cutout'
def sample_parameters(self):
angle = np.random.uniform(low=0, high=2*np.pi)
scales = np.maximum(np.random.uniform(low=0.1, high=0.25) * self.im_size, 1)
scales = (scales, scales)
fraction = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
seed = np.random.randint(low=0, high=2**32)
return {'angle' : angle, 'scales' : scales, 'fraction' : fraction, 'seed' : seed}
def transform(self, image, scales, angle, fraction, seed):
random_state = np.random.RandomState(seed=seed)
grid = random_state.uniform(size=(int(4*self.im_size//scales[0]), int(4*self.im_size//scales[1]))) < fraction
def mask_kernel(point, scales, angle, grid):
nx = (np.cos(angle) * point[0] + np.sin(angle) * point[1]) / scales[0]
ny = (-np.sin(angle) * point[0] + np.cos(angle) * point[1]) / scales[1]
return (int(nx % 2) != int(ny % 2)) or not grid[int(nx),int(ny)]
out = np.array([[image[y,x,:] if mask_kernel([y,x], scales, angle, grid) else np.array([128,128,128])\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class Lines(Augmentation):
tags = ['obscure']
name = 'lines'
def sample_parameters(self):
length = 1.0
density = float_parameter(sample_level(self.severity, self.max_intensity), 1.0)
angle = np.random.uniform(low=0.0, high=2*np.pi)
angle_variation = np.random.uniform(low=0.1, high=1.0)
seed = np.random.randint(low=0, high=2**32)
return {'length' : length, 'density' : density, 'angle' : angle, 'angle_variation' : angle_variation, 'seed' : seed}
def transform(self, image, length, density, angle, angle_variation, seed):
num_lines = int(density * self.im_size)
l = length * self.im_size
random_state = np.random.RandomState(seed=seed)
out = image.copy()
for i in range(num_lines):
x = self.im_size * random_state.uniform()
y = self.im_size * random_state.uniform()
a = angle + 2 * np.pi * angle_variation * (random_state.uniform() - 0.5)
s = np.sin(a) * l
c = np.cos(a) * l
#x1 = max(min(int(x-c), self.im_size-1), 0)
#x2 = max(min(int(x+c), self.im_size-1), 0)
#y1 = max(min(int(y-s), self.im_size-1), 0)
#y2 = max(min(int(y+s), self.im_size-1), 0)
x1 = int(x-c)
x2 = int(x+c)
y1 = int(y-s)
y2 = int(y+s)
rxc, ryc, rval = line_aa(x1, y1, x2, y2)
xc, yc, val = [], [], []
for rx, ry, rv in zip(rxc, ryc, rval):
if rx >= 0 and ry >= 0 and rx < self.im_size and ry < self.im_size:
xc.append(rx)
yc.append(ry)
val.append(rv)
xc, yc, val = np.array(xc, dtype=np.int), np.array(yc, dtype=np.int), np.array(val)
out[xc, yc, :] = (1.0 - val.reshape(-1,1)) * out[xc, yc, :].astype(np.float32) + val.reshape(-1,1)*128
return out.astype(np.uint8)
class RandomSample(Augmentation):
tags = ['obscure']
name = 'random_sample'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
density = 1.0 - float_parameter(sample_level(self.severity, self.max_intensity), 0.8)
return {'density' : density, 'seed' : seed}
def transform(self, image, density, seed):
random_state = np.random.RandomState(seed=seed)
num = int(density * self.im_size ** 2)
out = np.zeros_like(image)
#for i in range(num):
# point = np.random.randint(low=0, high=self.im_size, size=2)
# out[point[0], point[1], :] = image[point[0], point[1], :]
indices = random_state.choice(np.arange(self.im_size**2), size=num, replace=False)
for idx in indices:
out[idx//self.im_size, idx % self.im_size, :] = image[idx//self.im_size, idx % self.im_size, :]
return out
class BlueNoiseSample(Augmentation):
tags = ['obscure']
name = 'blue_noise_sample'
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
threshold = float_parameter(sample_level(self.severity, self.max_intensity), 3.0) - 2.5
return {'seed' : seed, 'threshold' : threshold}
def transform(self, image, seed, threshold):
random_state = np.random.RandomState(seed=seed)
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
#power = power / self.im_size
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
mask = noise > threshold
out = image * mask.reshape(self.im_size, self.im_size, 1)
return np.clip(out, 0, 255).astype(np.uint8)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/obscure.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
class Identity(Augmentation):
tags = ["identity"]
name = ['identity']
def __init__(self, severity=None, record=False, **kwargs):
super(Identity, self).__init__(severity, record, **kwargs)
def sample_parameters(self):
return {}
def transform(self, image):
return image
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/identity.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import Augmentation
from collections import namedtuple
import numpy as np
class Augmix(Augmentation):
tags = ['compositor', 'augmix_compose']
def __init__(self, severity=None, im_size=None, augmentation_list=[], width=3, max_depth=3, prob_coeff=1.0, random_depth=True, record=False, float_output=True):
super(Augmix, self).__init__(severity, im_size, record)
self.width = width
self.depth = max_depth
self.random_depth = random_depth
self.prob_coeff = prob_coeff
self.augs = augmentation_list
self.float_output = float_output
self.record_length = max([len(a.convert_to_numpy(a.sample_parameters())) for a in self.augs])\
if self.augs else 0
def transform(self, image, m, ws, aug_record):
if not self.augs:
return image
mix = np.zeros_like(image).astype(np.float32)
for i in range(self.width):
image_aug = image.copy()
for j in range(self.depth):
pos = self.depth * i + j
if aug_record[pos].idx < 0:
continue
op = self.augs[aug_record[pos].idx].transform
image_aug = op(image_aug, **(aug_record[pos].params))
mix += ws[i] * image_aug.astype(np.float32)
mixed = (1 - m) * image.astype(np.float32) + m * mix
if self.float_output:
return mixed
return mixed.astype(np.uint8)
def sample_parameters(self):
ws = np.float32(np.random.dirichlet([self.prob_coeff] * self.width))
m = np.float32(np.random.beta(self.prob_coeff, self.prob_coeff))
if not self.augs:
return { 'm' : m, 'ws' : ws, 'aug_record': []}
aug_idxs = np.random.randint(low=0, high=len(self.augs), size=self.width*self.depth)
if self.random_depth:
for i in range(self.width):
inverse_depth = np.random.randint(1,self.depth+1)
aug_idxs[self.depth*i+inverse_depth:self.depth*(i+1)] = -1
aug_params = [self.augs[i].sample_parameters() if i != -1 else {} for i in aug_idxs]
AugRecord = namedtuple('AugRecord', ('idx', 'params'))
return { 'm' : m, 'ws' : ws, 'aug_record' : [AugRecord(idx, params) for idx, params in zip(aug_idxs, aug_params)]}
def convert_from_numpy(self, record):
out = {}
out['m'] = record[0]
out['ws'] = record[1:self.width+1]
if not self.augs:
out['aug_record'] = {}
return out
idxs = record[self.width+1:self.width+1+self.width*self.depth]
params = []
for i,idx in enumerate(idxs):
offset = self.width+1+self.width*self.depth + i * self.record_length
if idx < 0:
params.append({})
continue
sub_params = self.augs[int(idx)].convert_from_numpy(record[offset:offset+self.record_length])
params.append(sub_params)
AugRecord = namedtuple('AugRecord', ('idx', 'params'))
out['aug_record'] = [AugRecord(int(idx), params) for idx, params in zip(idxs, params)]
return out
def convert_to_numpy(self, record):
out = np.zeros(1+self.width+(self.width*self.depth*(self.record_length+1)))
if not self.augs:
return out
out[0] = record['m']
out[1:self.width+1] = record['ws']
sub_record = record['aug_record']
out[self.width+1:self.width+1+self.width*self.depth] = [i.idx for i in sub_record]
param_list = []
for a in record['aug_record']:
if a.idx >= 0:
curr_params = self.augs[a.idx].convert_to_numpy(a.params)
if len(curr_params) < self.record_length:
curr_params = np.concatenate((curr_params, np.zeros(self.record_length-len(curr_params))))
else:
curr_params = np.zeros(self.record_length)
param_list.append(curr_params)
params = np.concatenate(param_list)
out[self.width+1+self.width*self.depth:] = params
return out
class AutoAugmentOld(Augmentation):
tags = ['compositor', 'autoaugment_compose']
def __init__(self, subpolicies, severity=None, im_size=None, record=False):
super(AutoAugmentOld, self).__init__(severity, im_size, record)
self.subpolicies = subpolicies
self.record_length = 1+2*max([len(policy) for policy in self.subpolicies])
def sample_parameters(self):
policy_idx = np.random.randint(low=0, high=len(self.subpolicies))
selected = np.random.uniform(low=0.0, high=1.0, size=len(self.subpolicies[policy_idx]))
thresholds = np.array([transform_tuple[1] for transform_tuple in self.subpolicies[policy_idx]])
selected = (selected < thresholds).tolist()
flipped = [(np.random.choice([1,-1]) if (selected[i]==True and p[2] is not None and p[2]<0) else 1) for i,p in enumerate(self.subpolicies[policy_idx])]
return { 'policy_idx' : policy_idx, 'selections' : selected, 'flipped' : flipped }
def transform(self, image, policy_idx, selections, flipped):
policy = self.subpolicies[policy_idx]
for i, transform_tuple in enumerate(policy):
if selections[i]:
transform = transform_tuple[0]
magnitude = transform_tuple[2]
if magnitude is not None:
image = transform.transform(image, magnitude * flipped[i])
else:
image = transform.transform(image)
return image
def convert_to_numpy(self, params):
out = np.zeros(self.record_length)
out[0] = params['policy_idx']
curr_len = len(self.subpolicies[params['policy_idx']])
out[1:curr_len+1] = params['selections']
out[1+curr_len:1+2*curr_len] = params['flipped']
return out
def convert_from_numpy(self, numpy_record):
params = {}
params['policy_idx'] = int(numpy_record[0])
curr_len = len(self.subpolicies[params['policy_idx']])
params['selections'] = [True if int(x)==1 else False for x in numpy_record[1:1+curr_len]]
params['flipped'] = [int(x) for x in numpy_record[1+curr_len:1+2*curr_len]]
return params
class AutoAugment(Augmentation):
tags = ['compositor', 'autoaugment_compose']
def __init__(self, subpolicies, severity=None, im_size=None, record=False):
super(AutoAugment, self).__init__(severity, im_size, record)
self.subpolicies = subpolicies
self.record_length = 1+2*max([len(policy) for policy in self.subpolicies])
def sample_parameters(self):
policy_idx = np.random.randint(low=0, high=len(self.subpolicies))
selected = np.random.uniform(low=0.0, high=1.0, size=len(self.subpolicies[policy_idx]))
thresholds = np.array([transform_tuple[1] for transform_tuple in self.subpolicies[policy_idx]])
selected = (selected < thresholds).tolist()
flipped = [(np.random.choice([1,-1]) if (selected[i]==True and p[3] is not None) else 1) for i,p in enumerate(self.subpolicies[policy_idx])]
return { 'policy_idx' : policy_idx, 'selections' : selected, 'flipped' : flipped }
def transform(self, image, policy_idx, selections, flipped):
policy = self.subpolicies[policy_idx]
for i, transform_tuple in enumerate(policy):
if selections[i]:
transform = transform_tuple[0]
magnitude = transform_tuple[2]
if magnitude is not None:
magnitude = (transform_tuple[3] if transform_tuple[3] is not None else 0) + magnitude * flipped[i]
image = transform.transform(image, magnitude)
else:
image = transform.transform(image)
return image
def convert_to_numpy(self, params):
out = np.zeros(self.record_length)
out[0] = params['policy_idx']
curr_len = len(self.subpolicies[params['policy_idx']])
out[1:curr_len+1] = params['selections']
out[1+curr_len:1+2*curr_len] = params['flipped']
return out
def convert_from_numpy(self, numpy_record):
params = {}
params['policy_idx'] = int(numpy_record[0])
curr_len = len(self.subpolicies[params['policy_idx']])
params['selections'] = [True if int(x)==1 else False for x in numpy_record[1:1+curr_len]]
params['flipped'] = [int(x) for x in numpy_record[1+curr_len:1+2*curr_len]]
return params
class RandomSample(Augmentation):
def __init__(self, augmentation_list, weights=None, severity=None, im_size=None, record=False):
super(RandomSample, self).__init__(severity=severity, im_size=im_size, record=record)
self.transforms = augmentation_list
self.weights = weights
assert weights is None or (len(weights)==len(augmentation_list)),\
"Must have equal number of weights as transforms."
assert weights is None or (np.sum(weights)==1.0),\
"Weights must sum to one."
self.record_length = max([len(a.convert_to_numpy(a.sample_parameters())) for a in self.transforms])\
if self.transforms else 0
def sample_parameters(self):
idx = np.random.choice(np.arange(len(self.transforms)), p=self.weights)
transform_params = self.transforms[idx].sample_parameters()
return {'idx': idx, 'transform_params': transform_params}
def transform(self, image, idx, transform_params):
return self.transforms[idx].transform(image, **transform_params)
def convert_from_numpy(self, record):
idx = int(record[0])
transform_params = self.transforms[idx].convert_from_numpy(record[1:])
return {'idx' : idx, 'transform_params': transform_params}
def convert_to_numpy(self, record):
numpy_record = np.zeros(1+self.record_length)
numpy_record[0] = record['idx']
numpy_params = self.transforms[record['idx']].convert_to_numpy(record['transform_params'])
numpy_record[1:1+len(numpy_params)] = numpy_params
return numpy_record
class ComposeSerially(Augmentation):
def __init__(self, augmentation_list, severity=None, im_size=None, record=False):
self.augmentation_list = augmentation_list
self.record_lengths = [len(a.convert_to_numpy(a.sample_parameters())) for a in augmentation_list]
def sample_parameters(self):
params = {'param_list' : [a.sample_parameters() for a in self.augmentation_list]}
return params
def transform(self, image, param_list):
for a, p in zip(self.augmentation_list, param_list):
image = a.transform(image, **p)
return image
def convert_to_numpy(self, params):
record = None
for a, p in zip(self.augmentation_list, params['param_list']):
record = np.concatenate((record, a.convert_to_numpy(p)), axis=0)\
if record is not None else a.convert_to_numpy(p)
return record
def convert_from_numpy(self, numpy_record):
offset = 0
params = {'params_list' : []}
for a, d in zip(self.augmentation_list, self.record_lengths):
params['params_list'].append(numpy_record[offset:offset+d])
offset += d
return params
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/compositions.py |
# This source code is adapted from code licensed under the license at
# third_party/imagenetc_license from the root directory of the repository
# Originally available: github.com/hendrycks/robustness
# Modifications Copyright (c) Facebook, Inc. and its affiliates,
# licensed under the MIT license found in the LICENSE file in the root
# directory of this source tree.
from .base import Augmentation
import pickle
import torch
import torch.utils.data
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
import os
from PIL import Image
import os.path
import time
import torch
import torchvision.datasets as dset
import torchvision.transforms as trn
import torch.utils.data as data
import numpy as np
from PIL import Image
# /////////////// Distortion Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
warnings.simplefilter("ignore", UserWarning)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(seed, mapsize, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
random_state = np.random.RandomState(seed=seed)
def wibbledmean(array):
return array / 4 + wibble * random_state.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / zoom_factor))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
# /////////////// End Distortion Helpers ///////////////
# /////////////// Distortions ///////////////
def gaussian_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [0.04, 0.06, .08, .09, .10][int(severity) - 1]
else:
c = [.08, .12, 0.18, 0.26, 0.38][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(x + random_state.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [500, 250, 100, 75, 50][int(severity) - 1]
else:
c = [60, 25, 12, 5, 3][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(random_state.poisson(x * c) / c, 0, 1) * 255
def impulse_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [.01, .02, .03, .05, .07][int(severity) - 1]
else:
c = [.03, .06, .09, 0.17, 0.27][int(severity) - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c, seed=seed)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, im_size, seed, severity=1):
if im_size == 32:
c = [.06, .1, .12, .16, .2][int(severity) - 1]
else:
c = [.15, .2, 0.35, 0.45, 0.6][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x) / 255.
return np.clip(x + x * random_state.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, im_size, severity=1):
if im_size == 32:
c = [.4, .6, 0.7, .8, 1][int(severity) - 1]
else:
c = [1, 2, 3, 4, 6][int(severity) - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, im_size, seed, severity=1):
# sigma, max_delta, iterations
if im_size == 32:
c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][int(severity) - 1]
else:
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(im_size - c[1], c[1], -1):
for w in range(im_size - c[1], c[1], -1):
dx, dy = random_state.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
def defocus_blur(x, im_size, severity=1):
if im_size == 32:
c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][int(severity) - 1]
else:
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][int(severity) - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, im_size, angle, severity=1):
if im_size == 32:
c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][int(severity) - 1]
else:
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][int(severity) - 1]
output = BytesIO()
x = Image.fromarray(x)
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=angle)
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if x.shape != (im_size, im_size):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, im_size, severity=1):
if im_size == 32:
c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][int(severity) - 1]
else:
c = [np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)][int(severity) - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, im_size, seed, severity=1):
if im_size == 32:
c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][int(severity) - 1]
mapsize = 32
else:
c = [(1.5, 2), (2, 2), (2.5, 1.7), (2.5, 1.5), (3, 1.4)][int(severity) - 1]
mapsize = 256
x = np.array(x) / 255.
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1], seed=seed, mapsize=mapsize)[:im_size, :im_size][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, im_size, frost_path, image_idx, crop_pos, severity=1):
if im_size == 32:
c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][int(severity) - 1]
else:
c = [(1, 0.4),
(0.8, 0.6),
(0.7, 0.7),
(0.65, 0.7),
(0.6, 0.75)][int(severity) - 1]
idx = image_idx
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][idx]
filename = os.path.join(frost_path, filename)
frost = cv2.imread(filename)
if im_size == 32:
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
# randomly crop and convert to rgb
#x_start, y_start = np.random.randint(0, frost.shape[0] - 32), np.random.randint(0, frost.shape[1] - 32)
x_start, y_start = crop_pos[0], crop_pos[1]
frost = frost[x_start:x_start + im_size, y_start:y_start + im_size][..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
def snow(x, im_size, seed, severity=1):
if im_size == 32:
c = [(0.1,0.2,1,0.6,8,3,0.95),
(0.1,0.2,1,0.5,10,4,0.9),
(0.15,0.3,1.75,0.55,10,4,0.9),
(0.25,0.3,2.25,0.6,12,6,0.85),
(0.3,0.3,1.25,0.65,14,12,0.8)][int(severity) - 1]
else:
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7),
(0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
x = np.array(x, dtype=np.float32) / 255.
snow_layer = random_state.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=random_state.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(im_size, im_size, 1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, im_size, seed, severity=1):
if im_size == 32:
c = [(0.62,0.1,0.7,0.7,0.5,0),
(0.65,0.1,0.8,0.7,0.5,0),
(0.65,0.3,1,0.69,0.5,0),
(0.65,0.1,0.7,0.69,0.6,1),
(0.65,0.1,0.5,0.68,0.6,1)][int(severity) - 1]
else:
c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
(0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0),
(0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][int(severity) - 1]
x = np.array(x, dtype=np.float32) / 255.
random_state = np.random.RandomState(seed=seed)
liquid_layer = random_state.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
# ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)
# ker -= np.mean(ker)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# m = np.abs(m) ** (1/c[4])
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, im_size, severity=1):
if im_size == 32:
c = [.75, .5, .4, .3, 0.15][int(severity) - 1]
else:
c = [0.4, .3, .2, .1, .05][int(severity) - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, im_size, severity=1):
if im_size == 32:
c = [.05, .1, .15, .2, .3][int(severity) - 1]
else:
c = [.1, .2, .3, .4, .5][int(severity) - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, im_size, severity=1):
if im_size == 32:
c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][int(severity) - 1]
else:
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][int(severity) - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, im_size, severity=1):
if im_size == 32:
c = [80, 65, 58, 50, 40][int(severity) - 1]
else:
c = [25, 18, 15, 10, 7][int(severity) - 1]
x = Image.fromarray(x)
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return x
def pixelate(x, im_size, severity=1):
if im_size == 32:
c = [0.95, 0.9, 0.85, 0.75, 0.65][int(severity) - 1]
else:
c = [0.6, 0.5, 0.4, 0.3, 0.25][int(severity) - 1]
x = Image.fromarray(x)
x = x.resize((int(im_size * c), int(im_size * c)), PILImage.BOX)
x = x.resize((im_size, im_size), PILImage.BOX)
return x
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, im_size, seed, severity=1):
IMSIZE = im_size
if im_size == 32:
c = [(IMSIZE*0, IMSIZE*0, IMSIZE*0.08),
(IMSIZE*0.05, IMSIZE*0.2, IMSIZE*0.07),
(IMSIZE*0.08, IMSIZE*0.06, IMSIZE*0.06),
(IMSIZE*0.1, IMSIZE*0.04, IMSIZE*0.05),
(IMSIZE*0.1, IMSIZE*0.03, IMSIZE*0.03)][int(severity) - 1]
else:
c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 224, but ultimately nothing is incorrect
(244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][int(severity) - 1]
random_state = np.random.RandomState(seed=seed)
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(random_state.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(random_state.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
class GaussianNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'gaussian_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(gaussian_noise(image, self.im_size, seed, severity=self.severity))
class ShotNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'shot_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(shot_noise(image, self.im_size, seed, severity=self.severity))
class ImpulseNoise(Augmentation):
tags = ['imagenet_c', 'noise']
name = 'impulse_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(impulse_noise(image, self.im_size, seed, severity=self.severity))
class SpeckleNoise(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'speckle_noise'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(speckle_noise(image, self.im_size, seed, severity=self.severity))
class ElasticTransform(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'elastic_transform'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(elastic_transform(image, self.im_size, seed, severity=self.severity))
class GlassBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'glass_blur'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(glass_blur(image, self.im_size, seed, severity=self.severity))
class Snow(Augmentation):
tags = ['imagenet_c', 'weather']
name = 'snow'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(snow(image, self.im_size, seed, severity=self.severity))
class Spatter(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'spatter'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(spatter(image, self.im_size, seed, severity=self.severity))
class Fog(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'fog'
def sample_parameters(self):
seed = np.random.randint(0,2**32)
return {'seed': seed}
def transform(self, image, seed):
return np.uint8(fog(image, self.im_size, seed, severity=self.severity))
class ZoomBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'zoom_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(zoom_blur(image, self.im_size, severity=self.severity))
class Pixelate(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'pixelate'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(pixelate(image, self.im_size, severity=self.severity))
class JPEGCompression(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'jpeg_compression'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(jpeg_compression(image, self.im_size, severity=self.severity))
class Contrast(Augmentation):
tags = ['imagenet_c', 'digital']
name = 'contrast'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(contrast(image, self.im_size, severity=self.severity))
class Brightness(Augmentation):
tags = ['imagenet_c', 'weather']
name = 'brightness'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(brightness(image, self.im_size, severity=self.severity))
class MotionBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'motion_blur'
def sample_parameters(self):
angle = np.random.uniform(-45,45)
return {'angle' : angle}
def transform(self, image, angle):
return np.uint8(motion_blur(image, self.im_size, angle=angle, severity=self.severity))
class GaussianBlur(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'gaussian_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(gaussian_blur(image, self.im_size, severity=self.severity))
class Frost(Augmentation):
tags = ['imagenet_c', 'path_required', 'weather']
name = 'frost'
def __init__(self, severity, im_size, record=False, max_intensity=False, frost_path=None):
super().__init__(severity, im_size, record, max_intensity)
self.frost_path = frost_path
def sample_parameters(self):
image_idx = np.random.randint(5)
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][image_idx]
filename = os.path.join(self.frost_path, filename)
frost = cv2.imread(filename)
if self.im_size == 32:
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
x_start, y_start = np.random.randint(0, frost.shape[0] - self.im_size), np.random.randint(0, frost.shape[1] - self.im_size)
return {'image_idx' : image_idx, 'crop_pos' : (x_start, y_start)}
def transform(self, image, image_idx, crop_pos):
return np.uint8(frost(image, self.im_size, frost_path=self.frost_path, image_idx=image_idx, crop_pos=crop_pos, severity=self.severity))
def convert_to_numpy(self, params):
return np.array([params['image_idx']] + list( params['crop_pos']))
def convert_from_numpy(self, numpy_record):
return {'image_idx' : int(numpy_record[0]), 'crop_pos' : tuple(numpy_record[1:].astype(np.int).tolist())}
class DefocusBlur(Augmentation):
tags = ['imagenet_c', 'blur']
name = 'defocus_blur'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(defocus_blur(image, self.im_size, severity=self.severity))
class Saturate(Augmentation):
tags = ['imagenet_c', 'extra']
name = 'saturate'
def sample_parameters(self):
return {}
def transform(self, image):
return np.uint8(saturate(image, self.im_size, severity=self.severity))
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/imagenetc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import numpy as np
def is_iterable(obj):
try:
iter(obj)
except:
return False
else:
return True
class Augmentation(abc.ABC):
tags = ["abstract_base_class"]
def __init__(self, severity, im_size, record=False, max_intensity=False, **kwargs):
self.im_size = im_size
self.severity = severity
self.record = record
self.max_intensity = max_intensity
@abc.abstractmethod
def transform(self, image, **kwargs):
...
@abc.abstractmethod
def sample_parameters(self):
...
def __call__(self, image):
params = self.sample_parameters()
out = self.transform(image, **params)
if self.record:
return out, params
return out
def convert_to_numpy(self, params):
out = []
for k, v in params.items():
if isinstance(v, np.ndarray):
out.extend(v.flatten().tolist())
elif is_iterable(v):
out.extend([x for x in v])
else:
out.append(v)
return np.array(out)
def convert_from_numpy(self, numpy_record):
param_signature = self.sample_parameters()
#assert len(param_signature.keys())<=len(numpy_record), "Mismatched numpy_record."
offset = 0
for k, v in param_signature.items():
if isinstance(v, np.ndarray):
num = len(v.flatten())
data = numpy_record[offset:offset+num]
if v.dtype==np.int or v.dtype==np.uint:
data = np.round(data, 3)
data = data.astype(v.dtype)
param_signature[k] = data.reshape(v.shape)
offset += num
elif is_iterable(v):
data = []
for x in v:
if type(x) == 'int':
data.append(int(np.round(numpy_record[offset],3)))
else:
data.append(type(x)(numpy_record[offset]))
offset += 1
param_signature[k] = data
else:
if type(v) == 'int':
param_signature[k] = int(np.round(numpy_record[offset],3))
else:
param_signature[k] = type(v)(numpy_record[offset])
offset += 1
return param_signature
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level) * maxval / 10.
def sample_level(n, fixed=False):
if fixed:
return n
return np.random.uniform(low=0.1, high=n)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/utils/severity.py |
augmentation-corruption-fbr_main | experiments/overlap/augmentations/utils/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ... import augmentations as aug
master_aug_list = [
aug.pil.AutoContrast,
aug.pil.Equalize,
aug.pil.Posterize,
aug.pil.Solarize,
aug.pil.Affine,
aug.pil.ShearX,
aug.pil.ShearY,
aug.pil.TranslateX,
aug.pil.TranslateY,
aug.pil.Rotate,
aug.pil.ScaleX,
aug.pil.ScaleY,
aug.pil.ScaleFixedAspectRatio,
aug.pil.Invert,
aug.pil.ColorBalance,
aug.pil.Sharpness,
aug.pil.Contrast,
aug.pil.Brightness,
aug.pil.Quadrilateral,
aug.pil.KeystoneH,
aug.pil.KeystoneV,
aug.pil.Perspective,
aug.pil.QuadrilateralNoBars,
aug.pil.PerspectiveNoBars,
aug.additive_noise.SingleFrequencyGreyscale,
aug.additive_noise.SingleFrequencyColor,
aug.additive_noise.CocentricSineWaves,
aug.additive_noise.PlasmaNoise,
aug.additive_noise.VoronoiNoise,
aug.additive_noise.CausticNoise,
aug.additive_noise.PerlinNoise,
aug.additive_noise.BlueNoise,
aug.additive_noise.BrownishNoise,
aug.blurs.Scatter,
aug.blurs.ChromaticAbberation,
aug.blurs.TransverseChromaticAbberation,
aug.blurs.HomogeneousColorBlur,
aug.blurs.Erosion,
aug.blurs.Dilation,
aug.blurs.CircularMotionBlur,
aug.color.BleachBypass,
aug.color.Technicolor,
aug.color.Pseudocolor,
aug.color.HueShift,
aug.color.ColorDither,
aug.obscure.CutOut,
aug.obscure.CheckerBoardCutOut,
aug.additive_noise.Sparkles,
aug.additive_noise.InverseSparkles,
aug.obscure.Lines,
aug.obscure.RandomSample,
aug.obscure.BlueNoiseSample,
aug.distortion.PinchAndTwirl,
aug.distortion.PinchAndTwirlV2,
aug.distortion.CausticRefraction,
aug.distortion.FishEyeV2,
aug.distortion.WaterDrop,
aug.distortion.Ripple,
aug.imagenetc.GaussianNoise,
aug.imagenetc.ShotNoise,
aug.imagenetc.ImpulseNoise,
aug.imagenetc.SpeckleNoise,
aug.imagenetc.MotionBlur,
aug.imagenetc.DefocusBlur,
aug.imagenetc.ZoomBlur,
aug.imagenetc.GlassBlur,
aug.imagenetc.GaussianBlur,
aug.imagenetc.Brightness,
aug.imagenetc.Fog,
aug.imagenetc.Frost,
aug.imagenetc.Snow,
aug.imagenetc.Spatter,
aug.imagenetc.Contrast,
aug.imagenetc.Pixelate,
aug.imagenetc.JPEGCompression,
aug.imagenetc.ElasticTransform,
aug.imagenetc.Saturate,
]
aug_dict = {a.name : a for a in master_aug_list}
def get_aug_by_name(name):
return aug_dict[name]
def get_augs_by_tag(inclusions, exclusions=[]):
augs = []
for a in master_aug_list:
skip = False
for e in exclusions:
if e in a.tags:
skip = True
if skip:
continue
include = False
for i in inclusions:
if i in a.tags:
include = True
break
if include:
augs.append(a)
return augs
def parse_aug_string(aug_string, im_size, max_intensity=False, record=False, **aug_kwargs):
augs = []
for s in aug_string.split("--"):
if not s:
continue
name, sev = s.split("-")
a = aug_dict[name]
augs.append(a(float(sev),im_size, max_intensity=max_intensity, **aug_kwargs))
return augs
def build_aug_string(augs):
aug_string = ''
for aug in augs:
if aug_string != '':
aug_string += "--"
aug_string = aug_string + aug.name + "-" + str(aug.severity)
if aug_string == '':
aug_string = '--'
return aug_string
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/utils/aug_finder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class PerlinNoiseGenerator(object):
def __init__(self, random_state=None):
self.rand = np.random if random_state is None else random_state
B = 256
N = 16*256
def normalize(arr):
return arr / np.linalg.norm(arr)
self.p = np.arange(2*B+2)
self.g = np.array([normalize((random_state.randint(low=0, high=2**31, size=2) % (2*B) - B )/ B)\
for i in range(2*B+2)])
for i in np.arange(B-1,-1,-1):
k = self.p[i]
j = self.rand.randint(low=0, high=2**31) % B
self.p[i] = self.p[j]
self.p[j] = k
for i in range(B+2):
self.p[B+i] = self.p[i]
self.g[B+i,:] = self.g[i,:]
self.B = B
self.N = N
def s_curve(t):
return t**2 * (3.0 - 2.0 * t)
def noise(self, x, y):
t = x + self.N
bx0 = int(t) % self.B
bx1 = (bx0+1) % self.B
rx0 = t % 1
rx1 = rx0 - 1.0
t = y + self.N
by0 = int(t) % self.B
by1 = (by0+1) % self.B
ry0 = t % 1
ry1 = ry0 - 1.0
i = self.p[bx0]
j = self.p[bx1]
b00 = self.p[i + by0]
b10 = self.p[j + by0]
b01 = self.p[i + by1]
b11 = self.p[j + by1]
sx = PerlinNoiseGenerator.s_curve(rx0)
sy = PerlinNoiseGenerator.s_curve(ry0)
u = rx0 * self.g[b00,0] + ry0 * self.g[b00,1]
v = rx1 * self.g[b10,0] + ry0 * self.g[b10,1]
a = u + sx * (v - u)
u = rx0 * self.g[b01,0] + ry1 * self.g[b01,1]
v = rx1 * self.g[b11,0] + ry1 * self.g[b11,1]
b = u + sx * (v - u)
return 1.5 * (a + sy * (b - a))
def turbulence(self, x, y, octaves):
t = 0.0
f = 1.0
while f <= octaves:
t += np.abs(self.noise(f*x, f*y)) / f
f = f * 2
return t
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/utils/noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from PIL import Image
import torch
class PilToNumpy(object):
def __init__(self, as_float=False, scaled_to_one=False):
self.as_float = as_float
self.scaled_to_one = scaled_to_one
assert (not scaled_to_one) or (as_float and scaled_to_one),\
"Must output a float if rescaling to one."
def __call__(self, image):
if not self.as_float:
return np.array(image).astype(np.uint8)
elif notself.scaled_to_one:
return np.array(image).astype(np.float32)
else:
return np.array(image).astype(np.float32) / 255
class NumpyToPil(object):
def __init__(self):
pass
def __call__(self, image):
return Image.fromarray(image)
class NumpyToTensor(object):
def __init__(self, HWC_to_CHW=True, bit_to_float=True):
self.HWC_to_CHW = HWC_to_CHW
self.bit_to_float = bit_to_float
pass
def __call__(self, image):
image = image.astype(np.float32)
if self.bit_to_float:
image /= 255
if self.HWC_to_CHW:
image = image.transpose(2,0,1)
return torch.Tensor(image)
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/utils/converters.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def smoothstep(low, high, x):
x = np.clip(x, low, high)
x = (x - low) / (high - low)
return np.clip(3 * (x ** 2) - 2 * (x ** 3), 0, 1)
def bilinear_interpolation(image, point):
l = int(np.floor(point[0]))
u = int(np.floor(point[1]))
r, d = l+1, u+1
lu = image[l,u,:] if l >= 0 and l < image.shape[0]\
and u >= 0 and u < image.shape[1] else np.array([0,0,0])
ld = image[l,d,:] if l >= 0 and l < image.shape[0]\
and d >= 0 and d < image.shape[1] else np.array([0,0,0])
ru = image[r,u,:] if r >= 0 and r < image.shape[0]\
and u >= 0 and u < image.shape[1] else np.array([0,0,0])
rd = image[r,d,:] if r >= 0 and r < image.shape[0]\
and d >= 0 and d < image.shape[1] else np.array([0,0,0])
al = lu * (1.0 - point[1] + u) + ld * (1.0 - d + point[1])
ar = ru * (1.0 - point[1] + u) + rd * (1.0 - d + point[1])
out = al * (1.0 - point[0] + l) + ar * (1.0 - r + point[0])
return out
| augmentation-corruption-fbr_main | experiments/overlap/augmentations/utils/image.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from transform_finder import build_transform
import torch
import torchvision as tv
from utils.converters import PilToNumpy, NumpyToTensor
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = []
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions.extend([(vals[0], float(v)) for v in vals[1:]])
return corruptions
@torch.no_grad()
def test_c_bar(
model,
dataset_type,
dataset_path,
batch_size,
corruption_string=None,
loader_kwargs={},
logger=None,
calculate_averages=True,
distributed=False,
num_gpus=1
):
assert dataset_type in ['imagenet', 'cifar'],\
"Only ImageNet and CIFAR-10 are supported."
if corruption_string is None:
corruption_filename = 'imagenet_c_bar.csv' if dataset_type=='imagenet'\
else 'cifar10_c_bar.csv'
corruptions = read_corruption_csv(corruption_filename)
else:
corruptions = [(c.split("-")[0], float(c.split("-")[1])) for c in corruption_string.split("--")]
results = {}
for name, severity in corruptions:
if dataset_type=='imagenet':
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)
])
path = os.path.join(dataset_path, 'val')
dataset = tv.datasets.ImageFolder(path, transform=transform)
elif dataset_type=='cifar':
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)\
if distributed and num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
drop_last=False,
**loader_kwargs
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if distributed and num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(dataset))
corruption_string = "{}-{:.2f}".format(name, severity)
if logger:
logger.info("Top1 Error for {}: {:.2f}".format(corruption_string, err))
results[corruption_string] = err
if calculate_averages:
import numpy as np
unique_corruption_names = list(set([c.split("-")[0] for c in results]))
avg_errs = {"{}-avg".format(u) : np.mean([results[c] for c in results if c.split("-")[0]==u])
for u in unique_corruption_names}
overall_err = np.mean(list(results.values()))
results.update(avg_errs)
results['overall-avg'] = overall_err
if logger:
for k,v in avg_errs.items():
logger.info("Top1 Error for {}: {:.2f}".format(k,v))
logger.info("Average Top1 Error: {}".format(overall_err))
return results
| augmentation-corruption-fbr_main | imagenet_c_bar/test_c_bar.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import corrupt as corr
transform_list = [
corr.ColorBalance,
corr.QuadrilateralNoBars,
corr.PerspectiveNoBars,
corr.SingleFrequencyGreyscale,
corr.CocentricSineWaves,
corr.PlasmaNoise,
corr.VoronoiNoise,
corr.CausticNoise,
corr.PerlinNoise,
corr.BlueNoise,
corr.BrownishNoise,
corr.Scatter,
corr.ChromaticAbberation,
corr.TransverseChromaticAbberation,
corr.CircularMotionBlur,
corr.BleachBypass,
corr.Technicolor,
corr.Pseudocolor,
corr.HueShift,
corr.ColorDither,
corr.CheckerBoardCutOut,
corr.Sparkles,
corr.InverseSparkles,
corr.Lines,
corr.BlueNoiseSample,
corr.PinchAndTwirl,
corr.CausticRefraction,
corr.FishEye,
corr.WaterDrop,
corr.Ripple,
]
transform_dict = {t.name : t for t in transform_list}
def build_transform(name, severity, dataset_type):
assert dataset_type in ['cifar', 'imagenet'],\
"Only cifar and imagenet image resolutions are supported."
return transform_dict[name](severity=severity,
im_size=(32 if dataset_type=='cifar' else 224)
)
def build_transforms_from_string(string, dataset_type):
im_size = (32 if dataset_type=='cifar' else 224)
transforms = []
for s in string.split("--"):
if not s:
continue
name, sev = s.split("-")
t = transform_dict[name]
transforms.append(t(float(sev),im_size))
return transforms
def transform_string(transforms):
string = ''
for t in transforms:
if string != '':
string += "--"
string = string + t.name + "-" + str(t.severity)
if string == '':
string = '--'
return string
def get_transforms_by_tag(inclusions, exclusions=[]):
transforms = []
for t in transform_list:
if any([i in t.tags for i in inclusions])\
and not any([e in t.tags for e in exclusions]):
transforms.append(t)
return transforms
| augmentation-corruption-fbr_main | imagenet_c_bar/transform_finder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchvision as tv
from transform_finder import build_transform
from utils.converters import PilToNumpy, NumpyToPil
import os
import numpy as np
import torch
from PIL import Image
parser = argparse.ArgumentParser(description="Make CIFAR-10-C-Bar")
parser.add_argument('--imagenet_dir', type=str, required=True,
help='The path to the ImageNet dataset. This path should contain '
'the folder val/')
parser.add_argument('--out_dir', type=str, default='.',
help='The path to where ImageNet-C will be saved.')
parser.add_argument('--num_workers', type=int, default=10,
help='The number of workers to build images with.')
parser.add_argument('--batch_size', type=int, default=100,
help='Batch size of torch data loader used to parallelize '
'data processing.')
parser.add_argument('--corruption_file', type=str, default='imagenet_c_bar.csv',
help='A file that specifies which corruptions in which severities '
'to produce. Path is relative to the script.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed used to generate corruptions.')
class SavingDataset(tv.datasets.ImageFolder):
def __init__(self, root, out_dir, transform=None):
super(SavingDataset, self).__init__(root, transform=transform)
self.out_dir = out_dir
def __getitem__(self, index):
image, label = super(SavingDataset, self).__getitem__(index)
class_name = self.classes[label]
out_dir = os.path.join(self.out_dir, class_name)
try:
os.mkdir(out_dir)
except FileExistsError:
pass
file_name = os.path.basename(self.samples[index][0])
save_path = os.path.join(out_dir, file_name)
Image.fromarray(np.uint8(image)).save(save_path, quality=85, optimize=True)
return image, label
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = {}
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions[vals[0]] = [float(v) for v in vals[1:]]
return corruptions
def main():
args = parser.parse_args()
dataset_path = args.imagenet_dir
corruption_file = args.corruption_file
out_dir = os.path.join(args.out_dir, 'ImageNet-C-Bar')
np.random.seed(args.seed)
bs = args.batch_size
if not os.path.exists(out_dir):
os.mkdir(out_dir)
file_dir = os.path.dirname(os.path.realpath(__file__))
corruption_csv = os.path.join(file_dir, corruption_file)
corruptions = read_corruption_csv(corruption_csv)
for name, severities in corruptions.items():
corruption_dir = os.path.join(out_dir, name)
if not os.path.exists(corruption_dir):
os.mkdir(corruption_dir)
for i, severity in enumerate(severities):
severity_dir = os.path.join(corruption_dir, "{:.2f}".format(severity))
if not os.path.exists(severity_dir):
os.mkdir(severity_dir)
print("Starting {}-{:.2f}...".format(name, severity))
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type='imagenet'),
])
path = os.path.join(dataset_path, 'val')
dataset = SavingDataset(path, severity_dir, transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
sampler=None,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers,
batch_size=bs
)
for j, (im, label) in enumerate(loader):
if (j+1) % 10 == 0:
print("Completed {}/{}".format(j, len(loader)))
if __name__=="__main__":
main()
| augmentation-corruption-fbr_main | imagenet_c_bar/make_imagenet_c_bar.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from math import floor, ceil
from PIL import Image
from scipy.fftpack import ifft2
from scipy.ndimage import gaussian_filter, rotate, shift, zoom
from skimage.draw import line_aa
from skimage.color import rgb2hsv, hsv2rgb
from utils.image import bilinear_interpolation, smoothstep
from utils.perlin_noise import PerlinNoiseGenerator
from base import Transform
import abc
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level) * maxval / 10.
class SingleFrequencyGreyscale(Transform):
name = 'single_frequency_greyscale'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
freq_mag = np.random.uniform(low=-np.pi, high=np.pi)
freq_2 = np.random.uniform(low=-abs(freq_mag), high=abs(freq_mag))
freq = np.array([freq_mag, freq_2])[np.random.permutation(2)]
phase = np.random.uniform(low=0, high=2*np.pi)
intensity = float_parameter(self.severity, 196)
return { 'freq' : freq, 'phase' : phase, 'intensity' : intensity}
def transform(self, image, freq, phase, intensity):
noise = np.array([[np.sin(x * freq[0] + y * freq[1] + phase)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['freq'].tolist() + [params['phase'], params['intensity']])
def convert_from_numpy(self, numpy_record):
return {'freq' : numpy_record[0:2],
'phase' : numpy_record[2],
'intensity' : numpy_record[3]
}
class CocentricSineWaves(Transform):
name = 'cocentric_sine_waves'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
offset = np.random.uniform(low=0, high=self.im_size, size=2)
freq = np.random.uniform(low=0, high=10)
amplitude = np.random.uniform(low=0, high=self.im_size/10)
ring_width = np.random.uniform(low=0, high=self.im_size/10)
intensity = [float_parameter(self.severity, 128) for i in range(3)]
return { 'offset' : offset,
'freq' : freq,
'amplitude' : amplitude,
'ring_width' : ring_width,
'intensity' : intensity
}
def transform(self, image, offset, freq, amplitude, ring_width, intensity):
def calc_intensity(x, y, x0, y0, freq, amplitude, ring_width):
angle = np.arctan2(x-x0, y-y0) * freq
distance = ((np.sqrt((x-x0)**2 + (y-y0)**2) + np.sin(angle) * amplitude) % ring_width) / ring_width
distance -= 1/2
return distance
noise = np.array([[calc_intensity(x, y, offset[0], offset[1], freq, amplitude, ring_width)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((intensity[0] * noise, intensity[1] * noise, intensity[2] * noise), axis=2)
return np.clip(image + noise, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
return np.array(params['offset'].tolist() + [params['freq'], params['amplitude'], params['ring_width']] + params['intensity'])
def convert_from_numpy(self, numpy_record):
return {'offset' : numpy_record[0:2].tolist(),
'freq' : numpy_record[2],
'amplitude' : numpy_record[3],
'ring_width' : numpy_record[4],
'intensity' : numpy_record[4:7].tolist()
}
class PlasmaNoise(Transform):
name = 'plasma_noise'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
time = np.random.uniform(low=0.0, high=6*np.pi)
iterations = np.random.randint(low=4, high=7)
sharpness = np.random.uniform(low=0.5, high=1.0)
scale = np.random.uniform(low=0.075, high=0.2) * self.im_size
intensity = float_parameter(self.severity,64)
return {'time' : time, 'iterations' : iterations, 'sharpness' : sharpness,
'scale' : scale, 'intensity' : intensity}
def transform(self, image, time, iterations, sharpness, scale, intensity):
def kernel(x, y, rand, iters, sharp, scale):
x /= scale
y /= scale
i = np.array([1.0, 1.0, 1.0, 0.0])
for s in range(iters):
r = np.array([np.cos(y * i[0] - i[3] + rand / i[1]), np.sin(x * i[0] - i[3] + rand / i[1])]) / i[2]
r += np.array([-r[1],r[0]]) * 0.3
x += r[0]
y += r[1]
i *= np.array([1.93, 1.15, (2.25 - sharp), rand * i[1]])
r = np.sin(x - rand)
b = np.sin(y + rand)
g = np.sin((x + y + np.sin(rand))*0.5)
return [r,g,b]
noise = np.array([[kernel(x,y, time, iterations, sharpness, scale)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((1-intensity/255) * image + intensity * noise, 0, 255).astype(np.uint8)
class VoronoiNoise(Transform):
name = 'voronoi_noise'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.uniform(low=0, high=10)
density = np.random.uniform(low=0.5, high=0.9)
size = np.random.uniform(low=0.05, high=0.2) * self.im_size
intensity = float_parameter(self.severity,255)
if np.random.uniform() > 0.5:
intensity = -intensity
return {'seed' : seed, 'density' : density, 'size' : size, 'intensity' : intensity}
def transform(self, image, size, seed, density, intensity):
def voronoi_hash(v, time):
m = np.array([[13.85, 47.77], [99.41, 88.48]])
w = np.matmul(m,v)
return (np.sin(w) * np.sqrt(w) * time * 0.0025) % 1
def kernel(x, y, size, seed, density):
v = np.array([[x],[y]]) / size + 1
g = v // 1
f = v % 1
dist = 1.0
for i in [-1,0,1]:
for j in [-1,0,1]:
p = np.array([[i],[j]])
curr_dist = np.linalg.norm((p + voronoi_hash(g+p, seed) - f).flatten())
dist = min(curr_dist, dist)
r = smoothstep(0, 1, dist * density) - 0.5
return r
noise = np.array([[kernel(x,y, size, seed, density)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = np.stack((noise, noise, noise), axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class CausticNoise(Transform):
name = 'caustic_noise'
tags = ['new_corruption']
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
intensity = float_parameter(self.severity, 255)
return { 'time' : time, 'size' : size, 'intensity' : intensity}
def transform(self, image, time, size, intensity):
def kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
noise = np.array([[kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class Sparkles(Transform):
name = 'sparkles'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
centers = np.random.uniform(low=0, high=self.im_size, size=(5, 2))
radii = np.array([float_parameter(self.severity, 0.1)\
for i in range(5)]) * self.im_size
amounts = np.array([50 for i in range(5)])
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
nrays = np.random.randint(low=50, high=200, size=5)
return {'centers' : centers, 'radii' : radii, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'nrays' : nrays, 'amounts' : amounts
}
def transform(self, image, centers, radii, nrays, amounts, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return value + f * (color - value)
random_state = np.random.RandomState(seed=seed)
for center, rays, amount, radius in zip(centers, nrays, amounts, radii):
ray_lengths = [max(1,radius + randomness / 100.0 * radius * random_state.randn())\
for i in range(rays)]
image = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class InverseSparkles(Transform):
name = 'inverse_sparkles'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
radius = 0.25 * self.im_size
amount = 100
amount = float_parameter(self.severity, 65)
amount = 100 - amount
color = np.array([255, 255, 255])
randomness = 25
seed = np.random.randint(low=0, high=2**32)
rays = np.random.randint(low=50, high=200)
return {'center' : center, 'radius' : radius, 'color' : color, 'randomness' : randomness,
'seed' : seed, 'rays' : rays, 'amount' : amount
}
def transform(self, image, center, radius, rays, amount, color, randomness, seed):
def kernel(point, value, center, radius, ray_lengths, amount, color):
rays = len(ray_lengths)
dp = point - center
dist = np.linalg.norm(dp)
angle = np.arctan2(dp[1], dp[0])
d = (angle + np.pi) / (2 * np.pi) * rays
i = int(d)
f = d - i
if radius != 0:
length = ray_lengths[i % rays] + f * (ray_lengths[(i+1) % rays] - ray_lengths[i % rays])
g = length**2 / (dist**2 + 1e-4)
g = g ** ((100 - amount) / 50.0)
f -= 0.5
f = 1 - f**2
f *= g
f = np.clip(f, 0, 1)
return color + f * (value - color)
random_state = np.random.RandomState(seed=seed)
ray_lengths = [radius + randomness / 100.0 * radius * random_state.randn()\
for i in range(rays)]
out = np.array([[kernel(np.array([y,x]), image[y,x,:].astype(np.float32), center, radius, ray_lengths, amount, color)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class PerlinNoise(Transform):
name = 'perlin_noise'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
m = np.array([[1,0],[0,1]]) / (32 * self.im_size / 224)
turbulence = 16.0
gain = 0.5
bias = 0.5
alpha = float_parameter(self.severity, 0.50)
seed = np.random.randint(low=0, high=2**32)
return {'m': m, 'turbulence' : turbulence, 'seed': seed,
'gain': gain, 'bias': bias, 'alpha': alpha}
def transform(self, image, m, turbulence, seed, gain, bias, alpha):
random_state = np.random.RandomState(seed=seed)
noise = PerlinNoiseGenerator(random_state)
def kernel(point, m, turbulence, gain, bias):
npoint = np.matmul(point, m)
f = noise.turbulence(npoint[0], npoint[1], turbulence)\
if turbulence != 1.0 else noise.noise(npoint[0], npoint[1])
f = gain * f + bias
return np.clip(np.array([f,f,f]),0,1.0)
noise = np.array([[kernel(np.array([y,x]),m,turbulence,gain, bias) for x in range(self.im_size)]\
for y in range(self.im_size)])
out = (1 - alpha) * image.astype(np.float32) + 255 * alpha * noise
return np.clip(out, 0, 255).astype(np.uint8)
class BlueNoise(Transform):
name = 'blue_noise'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(self.severity, 196)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class BrownishNoise(Transform):
name = 'brownish_noise'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
intensity = float_parameter(self.severity, 64)
return {'seed' : seed, 'intensity' : intensity}
def gen_noise(self, random_state):
center = self.im_size / 2
power = np.array([[1/(np.linalg.norm(np.array([x,y])-center)**2+1)\
for x in range(self.im_size)] for y in range(self.im_size)])
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
return noise
def transform(self, image, seed, intensity):
random_state = np.random.RandomState(seed=seed)
noise = np.stack([self.gen_noise(random_state) for i in range(3)],axis=2)
return np.clip(image + intensity * noise, 0, 255).astype(np.uint8)
class BleachBypass(Transform):
name = 'bleach_bypass'
tags = ['new_corruption']
def sample_parameters(self):
amount = float_parameter(self.severity, 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image*vals, axis=2, keepdims=True)/255
l = np.clip(10.0 * (luma - 0.45), 0, 1.0)
result1 = 2 * image * luma / 255
result2 = 1.0 - 2.0 * (1.0 - luma) * (1.0 - image /255)
out = ((1-l) * result1 + l * result2) * 255
return ((1-amount) * image + amount * out).astype(np.uint8)
class Technicolor(Transform):
name = 'technicolor'
tags = ['new_corruption']
def sample_parameters(self):
amount = float_parameter(self.severity, 1.0)
return { 'amount' : amount }
def transform(self, image, amount):
redmatte = 1.0 - (image[:,:,0]/255 - ((image[:,:,1]/2+image[:,:,2]/2))/255)
greenmatte = 1.0 - (image[:,:,1]/255 - ((image[:,:,0]/2+image[:,:,2]/2))/255)
bluematte = 1.0 - (image[:,:,2]/255 - ((image[:,:,0]/2+image[:,:,1]/2))/255)
red = greenmatte * bluematte * image[:,:,0].astype(np.float32)
green = redmatte * bluematte * image[:,:,1].astype(np.float32)
blue = redmatte * greenmatte * image[:,:,2].astype(np.float32)
new_color = np.stack((red, green, blue), axis=2)
return ((1-amount) * image + amount * new_color).astype(np.uint8)
class Pseudocolor(Transform):
name = 'pseudocolor'
tags = ['new_corruption']
def sample_parameters(self):
smoothness = np.random.uniform(low=0.25, high=0.75)
color0 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color1 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color2 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color3 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
color4 = np.random.randint(low=0, high=255, size=3).astype(np.uint8)
amount = float_parameter(self.severity, 0.5)
return { 'smoothness' : smoothness, 'color0' : color0, 'color1': color1,
'color2': color2, 'color3' : color3, 'color4' : color4, 'amount' : amount }
def transform(self, image, color0, color1, color2, color3, color4, smoothness, amount):
color0 = color0.astype(np.uint8)
color1 = color1.astype(np.uint8)
color2 = color2.astype(np.uint8)
color3 = color3.astype(np.uint8)
color4 = color4.astype(np.uint8)
def get_color(color0, color1, edge0, edge1, luma, smoothness):
smooth_color = color0 + ((color1 - color0) * smoothstep(edge0, edge1, luma))
a = 4.0 * (luma - edge0)
linear_color = (1 - a) * color0 + a * color1
return (1 - smoothness) * linear_color + smoothness * smooth_color
vals = np.array([0.2126, 0.7152, 0.0722]).reshape(1,1,3)
luma = np.sum(image.astype(np.float32)*vals, axis=2, keepdims=True)/255
c1 = get_color(color0, color1, 0.0, 0.25, luma, smoothness)
c2 = get_color(color1, color2, 0.25, 0.50, luma, smoothness)
c3 = get_color(color2, color3, 0.5, 0.75, luma, smoothness)
c4 = get_color(color3, color4, 0.75, 1.0, luma, smoothness)
out = (luma < 0.25) * c1 + ((luma >= 0.25)&(luma < 0.5)) * c2\
+ ((luma >= 0.5)&(luma < 0.75)) * c3 + (luma >= 0.75) * c4
return np.clip((1 - amount) * image + amount * out, 0, 255).astype(np.uint8)
def convert_to_numpy(self, params):
colors = []
for i in range(5):
colors.extend(params['color'+str(i)].tolist())
return np.array([params['smoothness']] + colors + [params['amount']])
def convert_from_numpy(self, numpy_record):
params = {'smoothness' : numpy_record[0], 'amount' : numpy_record[16]}
for i in range(5):
params['color'+str(i)] = numpy_record[1+3*i:1+3*(i+1)]
return params
class HueShift(Transform):
name = 'hue_shift'
tags = ['new_corruption']
def sample_parameters(self):
amount = float_parameter(self.severity, 0.5)
if np.random.uniform() < 0.5:
amount *= -1
return {'amount' : amount}
def transform(self, image, amount):
hsv_image = rgb2hsv(image.astype(np.float32)/255)
hsv_image[:,:,0] += (amount % 1.0)
return np.clip(hsv2rgb(hsv_image)*255, 0, 255).astype(np.uint8)
class ColorDither(Transform):
name = 'color_dither'
tags = ['new_corruption']
def sample_parameters(self):
levels = int_parameter(self.severity,10)
levels = 14-levels
return {'levels' : levels}
def transform(self, image, levels):
index = 0
color_map = [int(255 * i / (levels -1)) for i in range(levels)]
div = [int(levels*i / 256) for i in range(256)]
out = np.zeros_like(image)
image_copy = image.copy()
m = np.array([[0,0,0],[0,0,7],[3,5,1]])
for y in range(self.im_size):
reverse = ((y % 1) == 1)
if reverse:
index = y*self.im_size + self.im_size - 1
direction = -1
else:
index = y*self.im_size
direction = 1
for x in range(self.im_size):
curr_val = image_copy[index//self.im_size, index%self.im_size,:]
new_val = np.array([color_map[div[c]] for c in curr_val])
out[index//self.im_size, index%self.im_size,:] = new_val
e = curr_val - new_val
for i in [-1,0,1]:
iy = y+i
if iy > 0 and iy < self.im_size:
for j in [-1,0,1]:
jx = x+j
if jx > 0 and jx < self.im_size:
if reverse:
w = m[(i+1),-j+1]
else:
w = m[(i+1),j+1]
if w != 0:
k = index - j if reverse else index + j
curr_val = image_copy[k//self.im_size, k%self.im_size,:].astype(np.float32)
curr_val = np.clip(curr_val + e * w/np.sum(m),0,255).astype(np.uint8)
image_copy[k//self.im_size,k%self.im_size,:] = curr_val
index += direction
return np.clip(out, 0, 255).astype(np.uint8)
class ColorBalance(Transform):
name = 'color_balance'
tags = ['new_corruption']
def sample_parameters(self):
shift = float_parameter(self.severity, 1.0)
factor = 1.0 + np.random.choice([-1,1]) * shift
return { 'factor' : factor}
def transform(self, image, factor):
enhancer = ImageEnhance.Color(Image.fromarray(image))
return np.array(enhancer.enhance(factor))
class CheckerBoardCutOut(Transform):
name = 'checkerboard_cutout'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
angle = np.random.uniform(low=0, high=2*np.pi)
scales = np.maximum(np.random.uniform(low=0.1, high=0.25) * self.im_size, 1)
scales = (scales, scales)
fraction = float_parameter(self.severity, 1.0)
seed = np.random.randint(low=0, high=2**32)
return {'angle' : angle, 'scales' : scales, 'fraction' : fraction, 'seed' : seed}
def transform(self, image, scales, angle, fraction, seed):
random_state = np.random.RandomState(seed=seed)
grid = random_state.uniform(size=(int(4*self.im_size//scales[0]), int(4*self.im_size//scales[1]))) < fraction
def mask_kernel(point, scales, angle, grid):
nx = (np.cos(angle) * point[0] + np.sin(angle) * point[1]) / scales[0]
ny = (-np.sin(angle) * point[0] + np.cos(angle) * point[1]) / scales[1]
return (int(nx % 2) != int(ny % 2)) or not grid[int(nx),int(ny)]
out = np.array([[image[y,x,:] if mask_kernel([y,x], scales, angle, grid) else np.array([128,128,128])\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(out, 0, 255).astype(np.uint8)
class Lines(Transform):
name = 'lines'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
length = 1.0
density = float_parameter(self.severity, 1.0)
angle = np.random.uniform(low=0.0, high=2*np.pi)
angle_variation = np.random.uniform(low=0.1, high=1.0)
seed = np.random.randint(low=0, high=2**32)
return {'length' : length, 'density' : density, 'angle' : angle, 'angle_variation' : angle_variation, 'seed' : seed}
def transform(self, image, length, density, angle, angle_variation, seed):
num_lines = int(density * self.im_size)
l = length * self.im_size
random_state = np.random.RandomState(seed=seed)
out = image.copy()
for i in range(num_lines):
x = self.im_size * random_state.uniform()
y = self.im_size * random_state.uniform()
a = angle + 2 * np.pi * angle_variation * (random_state.uniform() - 0.5)
s = np.sin(a) * l
c = np.cos(a) * l
x1 = int(x-c)
x2 = int(x+c)
y1 = int(y-s)
y2 = int(y+s)
rxc, ryc, rval = line_aa(x1, y1, x2, y2)
xc, yc, val = [], [], []
for rx, ry, rv in zip(rxc, ryc, rval):
if rx >= 0 and ry >= 0 and rx < self.im_size and ry < self.im_size:
xc.append(rx)
yc.append(ry)
val.append(rv)
xc, yc, val = np.array(xc, dtype=np.int), np.array(yc, dtype=np.int), np.array(val)
out[xc, yc, :] = (1.0 - val.reshape(-1,1)) * out[xc, yc, :].astype(np.float32) + val.reshape(-1,1)*128
return out.astype(np.uint8)
class BlueNoiseSample(Transform):
name = 'blue_noise_sample'
tags = ['new_corruption', 'imagenet_c_bar', 'cifar_c_bar']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
threshold = float_parameter(self.severity, 3.0) - 2.5
return {'seed' : seed, 'threshold' : threshold}
def transform(self, image, seed, threshold):
random_state = np.random.RandomState(seed=seed)
center = self.im_size / 2
power = np.array([[np.linalg.norm(np.array([x,y])-center)\
for x in range(self.im_size)] for y in range(self.im_size)])
phases = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size, self.im_size//2))
if self.im_size % 2 == 0:
phases = np.concatenate((phases, phases[::-1,::-1]), axis=1)
else:
center_freq = random_state.uniform(low=0, high=2*np.pi, size=(self.im_size//2, 1))
center_freq = np.concatenate((center_freq, np.array([[0.0]]), center_freq[::-1,:]), axis=0)
phases = np.concatenate((phases, center_freq, phases[::-1,::-1]), axis=1)
fourier_space_noise = power * (np.cos(phases) + np.sin(phases) * 1j)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=0)
fourier_space_noise = np.roll(fourier_space_noise, self.im_size//2, axis=1)
noise = np.real(ifft2(fourier_space_noise))
noise = noise / np.std(noise)
mask = noise > threshold
out = image * mask.reshape(self.im_size, self.im_size, 1)
return np.clip(out, 0, 255).astype(np.uint8)
class CausticRefraction(Transform):
name = 'caustic_refraction'
tags = ['new_corruption', 'imagenet_c_bar']
def sample_parameters(self):
time = np.random.uniform(low=0.5, high=2.0)
size = np.random.uniform(low=0.75, high=1.25) * self.im_size
#size = self.im_size
eta = 4.0
lens_scale = float_parameter(self.severity, 0.5*self.im_size)
lighting_amount = float_parameter(self.severity, 2.0)
softening = 1
return { 'time' : time, 'size' : size, 'eta' : eta, 'lens_scale' : lens_scale, 'lighting_amount': lighting_amount, 'softening' : softening}
def transform(self, image, time, size, eta, lens_scale, lighting_amount, softening):
def caustic_noise_kernel(point, time, size):
point = point / size
p = (point % 1) * 6.28318530718 - 250
i = p.copy()
c = 1.0
inten = 0.005
for n in range(5):
t = time * (1.0 - (3.5 / (n+1)))
i = p + np.array([np.cos(t-i[0])+np.sin(t+i[1]),np.sin(t-i[1])+np.cos(t+i[0])])
length = np.sqrt((p[0] / (np.sin(i[0]+t)/inten))**2 + (p[1] / (np.cos(i[1]+t)/inten))**2)
c += 1.0/length
c /= 5.0
c = 1.17 - c ** 1.4
color = np.clip(np.abs(c) ** 8.0, 0, 1)
return np.array([color, color, color])
def refract(incident, normal, eta):
if np.abs(np.dot(incident, normal)) >= 1.0 - 1e-3:
return incident
angle = np.arccos(np.dot(incident, normal))
out_angle = np.arcsin(np.sin(angle) / eta)
out_unrotated = np.array([np.cos(out_angle), np.sin(out_angle), 0.0])
spectator_dim = np.cross(incident, normal)
spectator_dim /= np.linalg.norm(spectator_dim)
orthogonal_dim = np.cross(normal, spectator_dim)
rotation_matrix = np.stack((normal, orthogonal_dim, spectator_dim), axis=0)
return np.matmul(np.linalg.inv(rotation_matrix), out_unrotated)
def luma_at_offset(image, origin, offset):
pixel_value = image[origin[0]+offset[0], origin[1]+offset[1], :]\
if origin[0]+offset[0] >= 0 and origin[0]+offset[0] < image.shape[0]\
and origin[1]+offset[1] >= 0 and origin[1]+offset[1] < image.shape[1]\
else np.array([0.0,0.0,0])
return np.dot(pixel_value, np.array([0.2126, 0.7152, 0.0722]))
def luma_based_refract(point, image, caustics, eta, lens_scale, lighting_amount):
north_luma = luma_at_offset(caustics, point, np.array([0,-1]))
south_luma = luma_at_offset(caustics, point, np.array([0, 1]))
west_luma = luma_at_offset(caustics, point, np.array([-1, 0]))
east_luma = luma_at_offset(caustics, point, np.array([1,0]))
lens_normal = np.array([east_luma - west_luma, south_luma - north_luma, 1.0])
lens_normal = lens_normal / np.linalg.norm(lens_normal)
refract_vector = refract(np.array([0.0, 0.0, 1.0]), lens_normal, eta) * lens_scale
refract_vector = np.round(refract_vector, 3)
out_pixel = bilinear_interpolation(image, point+refract_vector[0:2])
out_pixel += (north_luma - south_luma) * lighting_amount
out_pixel += (east_luma - west_luma) * lighting_amount
return np.clip(out_pixel, 0, 1)
noise = np.array([[caustic_noise_kernel(np.array([y,x]), time, size)\
for x in range(self.im_size)] for y in range(self.im_size)])
noise = gaussian_filter(noise, sigma=softening)
image = image.astype(np.float32) / 255
out = np.array([[luma_based_refract(np.array([y,x]), image, noise, eta, lens_scale, lighting_amount)\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip((out * 255).astype(np.uint8), 0, 255)
class PinchAndTwirl(Transform):
name = 'pinch_and_twirl'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
num_per_axis = 5 if self.im_size==224 else 3
angles = np.array([np.random.choice([1,-1]) * float_parameter(self.severity, np.pi/2) for i in range(num_per_axis ** 2)]).reshape(num_per_axis, num_per_axis)
amount = float_parameter(self.severity, 0.4) + 0.1
return {'num_per_axis' : num_per_axis, 'angles' : angles, 'amount' : amount}
def transform(self, image, num_per_axis, angles, amount):
def warp_kernel(point, center, radius, amount, angle):
dx = point[0] - center[0]
dy = point[1] - center[1]
dist = np.linalg.norm(point - center)
if dist > radius or np.round(dist, 3) == 0.0:
return point
d = dist / radius
t = np.sin(np.pi * 0.5 * d) ** (- amount)
dx *= t
dy *= t
e = 1 - d
a = angle * (e ** 2)
out = center + np.array([dx*np.cos(a) - dy*np.sin(a), dx*np.sin(a) + dy*np.cos(a)])
return out
out = image.copy().astype(np.float32)
grid_size = self.im_size // num_per_axis
radius = grid_size / 2
for i in range(num_per_axis):
for j in range(num_per_axis):
l, r = i * grid_size, (i+1) * grid_size
u, d = j * grid_size, (j+1) * grid_size
center = np.array([u+radius, l+radius])
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, amount, angles[i,j]))\
for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class FishEye(Transform):
name = 'fish_eye'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.randint(low=0, high=2**32)
density = 0.01 * 224**2 / (self.im_size**2)
eta = float_parameter(self.severity, 2.0) + 1.0
radius = max(0.05 * self.im_size, 3)
return {'seed' : seed, 'density' : density, 'eta': eta, 'radius' : radius}
def transform(self, image, density, eta, radius, seed):
def warp_kernel(point, center, a, b, eta):
dx = point[0] - center[0]
dy = point[1] - center[1]
x2 = dx**2
y2 = dy**2
a2 = a**2
b2 = b**2
if (y2 >= (b2 - b2*x2/a2)):
return point
r = 1.0 / eta
z = np.sqrt((1.0 - x2/a2 - y2/b2) * (a*b))
z2 = z**2
x_angle = np.arccos(dx / np.sqrt(x2+z2))
angle_1 = np.pi/2 - x_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - x_angle - angle_2
out_x = point[0] - np.tan(angle_2)*z
y_angle = np.arccos(dy / np.sqrt(y2+z2))
angle_1 = np.pi/2 - y_angle
angle_2 = np.arcsin(np.sin(angle_1)*r)
angle_2 = np.pi/2 - y_angle - angle_2
out_y = point[1] - np.tan(angle_2)*z
return np.array([out_x, out_y])
random_state = np.random.RandomState(seed=seed)
num = int(density * self.im_size**2)
out = image.copy().astype(np.float32)
for i in range(num):
center = random_state.uniform(low=0, high=self.im_size, size=2)
l = max(np.floor(center[1]-radius).astype(np.int), 0)
r = min(np.ceil(center[1]+radius).astype(np.int), self.im_size)
u = max(np.floor(center[0]-radius).astype(np.int), 0)
d = min(np.ceil(center[0]+radius).astype(np.int), self.im_size)
out[u:d,l:r,:] = np.array([[bilinear_interpolation(out, warp_kernel(np.array([y,x]), center, radius, radius, eta)) for x in np.arange(l,r)] for y in np.arange(u,d)])
return np.clip(out, 0, 255).astype(np.uint8)
class WaterDrop(Transform):
name = 'water_drop'
tags = ['new_corruption']
def sample_parameters(self):
center = np.array([self.im_size //2, self.im_size//2])
center = np.random.uniform(low=0.25, high=0.75, size=2) * self.im_size
radius = self.im_size//2
amplitude = float_parameter(self.severity, 0.25)
wavelength = np.random.uniform(low=0.05, high=0.2) * self.im_size
phase = np.random.uniform(low=0.0, high=2*np.pi)
return {'center': center, 'radius' : radius, 'amplitude' : amplitude, 'wavelength' : wavelength, 'phase': phase}
def transform(self, image, center, radius, amplitude, wavelength, phase):
def warp_kernel(point, center, radius, amplitude, wavelength, phase):
dx, dy = point - center
dist = np.linalg.norm(point-center)
if dist > radius:
return point
amount = amplitude * np.sin(dist / wavelength * np.pi * 2 - phase)
if dist != 0.0:
amount *= wavelength / dist
return point + amount * (point - center)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), center, radius, amplitude, wavelength, phase))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class Ripple(Transform):
name = 'ripple'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
amplitudes = np.array([float_parameter(self.severity, 0.025)\
for i in range(2)]) * self.im_size
wavelengths = np.random.uniform(low=0.1, high=0.3, size=2) * self.im_size
phases = np.random.uniform(low=0, high=2*np.pi, size=2)
return {'amplitudes' : amplitudes, 'wavelengths' : wavelengths, 'phases' : phases}
def transform(self, image, wavelengths, phases, amplitudes):
def warp_kernel(point, wavelengths, phases, amplitudes):
return point + amplitudes * np.sin(2 * np.pi * point / wavelengths + phases)
image = np.array([[bilinear_interpolation(image, warp_kernel(np.array([y,x]), wavelengths, phases, amplitudes))\
for x in range(self.im_size)] for y in range(self.im_size)])
return np.clip(image, 0, 255).astype(np.uint8)
class PerspectiveNoBars(Transform):
name = 'perspective_no_bars'
tags = ['new_corruption']
def sample_parameters(self):
offset_x = float_parameter(self.severity, 0.1)
if np.random.uniform() > 0.5:
offset_x = -offset_x
offset_y = float_parameter(self.severity, 0.1)
if np.random.uniform() > 0.5:
offset_y = -offset_y
shift_x = float_parameter(self.severity, self.im_size / 10)
if np.random.uniform() > 0.5:
shift_x = -shift_x
shift_y = float_parameter(self.severity, self.im_size / 10)
if np.random.uniform() > 0.5:
shift_y = -shift_y
factor_x = float_parameter(self.severity, 0.15)
if np.random.uniform() > 0.5:
factor_x = -factor_x
factor_x = 2 ** factor_x
factor_y = float_parameter(self.severity, 0.15)
if np.random.uniform() > 0.5:
factor_y = -factor_y
factor_y = 2 ** factor_y
denom_x = float_parameter(self.severity, 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_x = denom_x
denom_y = float_parameter(self.severity, 0.2 / self.im_size)
if np.random.uniform() > 0.5:
denom_y = denom_y
perspective_params = np.array([factor_x, offset_x, shift_x,offset_y, factor_y, shift_y, denom_x, denom_y])
return {'perspective_params' : perspective_params}
def transform(self, image, perspective_params):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.PERSPECTIVE,
perspective_params,
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['perspective_params']
def convert_from_numpy(self, numpy_record):
return {'perspective_params' : numpy_record}
class QuadrilateralNoBars(Transform):
name = 'quadrilateral_no_bars'
tags = ['new_corruption']
def sample_parameters(self):
points = np.array([
[0,0],
[0, self.im_size],
[self.im_size, self.im_size],
[self.im_size, 0]
]).astype(np.float32)
shift = float_parameter(self.severity, self.im_size / 3) * np.random.uniform(low=-1,high=1, size=(4,2))
points += shift
return {'points' : points}
def transform(self, image, points):
im = Image.fromarray(image)
im = im.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
im = np.array(im).astype(np.float32)
mask = Image.fromarray(np.ones_like(image).astype(np.uint8)*255)
mask = mask.transform(
(self.im_size, self.im_size),
Image.QUAD,
points.flatten(),
resample=Image.BILINEAR
)
mask = np.array(mask).astype(np.float32) / 255
im = mask * im + (1-mask) * image
return im.astype(np.uint8)
def convert_to_numpy(self, params):
return params['points'].flatten()
def convert_from_numpy(self, numpy_record):
return {'points' : numpy_record.reshape(4,2)}
class Scatter(Transform):
name = 'scatter'
tags = ['new_corruption']
def sample_parameters(self):
seed = np.random.uniform(low=0.0, high=10.0)
radius = float_parameter(self.severity, self.im_size/10)
return {'seed' : seed, 'radius' : radius}
def transform(self, image, seed, radius):
def noise(x, y, seed):
i, j = np.sin(x * seed), np.cos(y * seed)
return (np.sin(12.9898*i + 78.233*j) * 43758.5453) % 1
def warp_kernel(x, y, seed, radius):
x_offset = radius * (-1.0 + noise(x, y, seed) * 2)
y_offset = radius * (-1.0 + noise(y, x, seed) * 2)
x_new = min(max(0, x+x_offset), self.im_size-1)
y_new = min(max(0, y+y_offset), self.im_size-1)
return y_new, x_new
out = np.array([[bilinear_interpolation(image, warp_kernel(x, y, seed, radius))\
for x in range(self.im_size)] for y in range(self.im_size)])
return out.astype(np.uint8)
class ChromaticAbberation(Transform):
name = 'chromatic_abberation'
tags = ['new_corruption']
def sample_parameters(self):
angles = np.random.uniform(low=0, high=2*np.pi, size=3)
dists = np.array([float_parameter(self.severity, self.im_size / 10)\
for i in range(3)])
shifts = np.array([[np.cos(a)*d, np.sin(a)*d] for a, d in zip(angles, dists)])
return { 'rgb_shifts' : shifts}
def transform(self, image, rgb_shifts):
out = image.copy()
for i in range(3):
out[:,:,i] = shift(image[:,:,i], rgb_shifts[i], prefilter=False)
return out
def convert_to_numpy(self, params):
return params['rgb_shifts'].flatten()
def convert_from_numpy(self, numpy_record):
return {'rgb_shifts' : numpy_record.reshape(3,2).astype(np.int)}
class TransverseChromaticAbberation(Transform):
name = 'transverse_chromatic_abberation'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
scales = np.array([float_parameter(self.severity, 0.5)\
for i in range(3)])
scale = float_parameter(self.severity, 0.5)
scales = np.array([1.0, 1.0+scale/2, 1.0+scale])
scales = scales[np.random.permutation(3)]
return { 'scales' : scales }
def transform(self, image, scales):
out = image.copy()
for c in range(3):
zoomed = zoom(image[:,:,c], scales[c], prefilter=False)
edge = (zoomed.shape[0]-self.im_size)//2
out[:,:,c] = zoomed[edge:edge+self.im_size, edge:edge+self.im_size]
return out.astype(np.uint8)
def convert_to_numpy(self, params):
return params['scales'].flatten()
def convert_from_numpy(self, numpy_record):
return {'scales' : numpy_record}
class CircularMotionBlur(Transform):
name = 'circular_motion_blur'
tags = ['new_corruption', 'cifar_c_bar']
def sample_parameters(self):
amount = float_parameter(self.severity,15)
return {'amount' : amount}
def transform(self, image, amount):
num = 21
factors = []
rotated = []
image = image.astype(np.float32) / 255
for i in range(num):
angle = (2*i/(num-1) - 1) * amount
rotated.append(rotate(image, angle, reshape=False))
factors.append(np.exp(- 2*(2*i/(num-1)-1)**2))
out = np.zeros_like(image)
for i, f in zip(rotated, factors):
out += f * i
out /= sum(factors)
return np.clip(out*255, 0, 255).astype(np.uint8)
| augmentation-corruption-fbr_main | imagenet_c_bar/corrupt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchvision as tv
from transform_finder import build_transform
from utils.converters import PilToNumpy, NumpyToPil
import os
import numpy as np
import torch
parser = argparse.ArgumentParser(description="Make CIFAR-10-C-Bar")
parser.add_argument('--cifar_dir', type=str, required=True,
help='The path to the CIFAR-10 dataset. This path should contain '
'the folder cifar-10-batches-py/')
parser.add_argument('--out_dir', type=str, default='.',
help='The path to where CIFAR-10-C will be saved.')
parser.add_argument('--num_workers', type=int, default=10,
help='The number of workers to build images with.')
parser.add_argument('--batch_size', type=int, default=200,
help='Batch size of torch data loader used to parallelize '
'data processing.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed used to generate corruptions.')
parser.add_argument('--corruption_file', type=str, default='imagenet_c_bar.csv',
help='A file that specifies which corruptions in which severities '
'to produce. Path is relative to the script.')
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = {}
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions[vals[0]] = [float(v) for v in vals[1:]]
return corruptions
def main():
args = parser.parse_args()
dataset_path = args.cifar_dir
out_dir = os.path.join(args.out_dir, 'CIFAR-10-C-Bar')
bs = args.batch_size
if not os.path.exists(out_dir):
os.mkdir(out_dir)
file_dir = os.path.dirname(os.path.realpath(__file__))
corruption_csv = os.path.join(file_dir, 'cifar10_c_bar.csv')
corruptions = read_corruption_csv(corruption_csv)
for name, severities in corruptions.items():
data = np.zeros((len(severities)*10000, 32, 32, 3)).astype(np.uint8)
labels = np.zeros(len(severities)*10000).astype(np.int)
for i, severity in enumerate(severities):
print("Starting {}-{:.2f}...".format(name, severity))
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type='cifar'),
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
sampler=None,
drop_last=False,
pin_memory=False,
num_workers=args.num_workers,
batch_size=bs
)
for j, (im, label) in enumerate(loader):
if im.size(0)==bs:
data[i*10000+j*bs:i*10000+bs*(j+1),:,:,:] = im.numpy().astype(np.uint8)
labels[i*10000+j*bs:i*10000+bs*(j+1)] = label.numpy()
else:
data[i*10000+j:,:,:,:] = im.numpy().astype(np.uint8)
labels[i*10000+j:] = label.numpy()
out_file = os.path.join(out_dir, name + ".npy")
print("Saving {} to {}.".format(name, out_file))
np.save(out_file, data)
labels_file = os.path.join(out_dir, "labels.npy")
np.save(labels_file, labels)
if __name__=="__main__":
main()
| augmentation-corruption-fbr_main | imagenet_c_bar/make_cifar10_c_bar.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import numpy as np
def is_iterable(obj):
try:
iter(obj)
except:
return False
else:
return True
class Transform(abc.ABC):
name = "abstract_transform"
def __init__(self, severity, im_size, record=False, max_intensity=False, **kwargs):
self.im_size = im_size
self.severity = severity
self.record = record
self.max_intensity = max_intensity
@abc.abstractmethod
def transform(self, image, **kwargs):
...
@abc.abstractmethod
def sample_parameters(self):
...
def __call__(self, image):
params = self.sample_parameters()
out = self.transform(image, **params)
if self.record:
return out, params
return out
def convert_to_numpy(self, params):
out = []
for k, v in params.items():
if isinstance(v, np.ndarray):
out.extend(v.flatten().tolist())
elif is_iterable(v):
out.extend([x for x in v])
else:
out.append(v)
return np.array(out)
def convert_from_numpy(self, numpy_record):
param_signature = self.sample_parameters()
#assert len(param_signature.keys())<=len(numpy_record), "Mismatched numpy_record."
offset = 0
for k, v in param_signature.items():
if isinstance(v, np.ndarray):
num = len(v.flatten())
data = numpy_record[offset:offset+num]
if v.dtype==np.int or v.dtype==np.uint:
data = np.round(data, 3)
data = data.astype(v.dtype)
param_signature[k] = data.reshape(v.shape)
offset += num
elif is_iterable(v):
data = []
for x in v:
if type(x) == 'int':
data.append(int(np.round(numpy_record[offset],3)))
else:
data.append(type(x)(numpy_record[offset]))
offset += 1
param_signature[k] = data
else:
if type(v) == 'int':
param_signature[k] = int(np.round(numpy_record[offset],3))
else:
param_signature[k] = type(v)(numpy_record[offset])
offset += 1
return param_signature
| augmentation-corruption-fbr_main | imagenet_c_bar/base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from PIL import Image
import torch
class PilToNumpy(object):
def __init__(self, as_float=False, scaled_to_one=False):
self.as_float = as_float
self.scaled_to_one = scaled_to_one
assert (not scaled_to_one) or (as_float and scaled_to_one),\
"Must output a float if rescaling to one."
def __call__(self, image):
if not self.as_float:
return np.array(image).astype(np.uint8)
elif notself.scaled_to_one:
return np.array(image).astype(np.float32)
else:
return np.array(image).astype(np.float32) / 255
class NumpyToPil(object):
def __init__(self):
pass
def __call__(self, image):
return Image.fromarray(image)
class NumpyToTensor(object):
def __init__(self, HWC_to_CHW=True, bit_to_float=True):
self.HWC_to_CHW = HWC_to_CHW
self.bit_to_float = bit_to_float
pass
def __call__(self, image):
image = image.astype(np.float32)
if self.bit_to_float:
image /= 255
if self.HWC_to_CHW:
image = image.transpose(2,0,1)
return torch.Tensor(image)
| augmentation-corruption-fbr_main | imagenet_c_bar/utils/converters.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.