response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Configure the common library for a specific branch. >>> set_swift_branch('main')
def set_swift_branch(branch): """Configure the common library for a specific branch. >>> set_swift_branch('main') """ global swift_branch swift_branch = branch
Override the default execute timeout
def set_default_execute_timeout(timeout): """Override the default execute timeout""" global DEFAULT_EXECUTE_TIMEOUT DEFAULT_EXECUTE_TIMEOUT = timeout
Clone Swift and dependencies using update-checkout.
def clone_repos(swift_branch, workspace='.'): """Clone Swift and dependencies using update-checkout.""" workspace = private_workspace(workspace) swift = os.path.join(workspace, "swift") # Clone swift checkout if not os.path.exists(swift): git_clone('[email protected]:apple/swift.git', swift, tree=swift_branch) # Update checkout checkout_cmd = [os.path.join(swift, 'utils/update-checkout')] checkout_cmd += [ "--clone-with-ssh", "--reset-to-remote", "--scheme", swift_branch, '-j', str(multiprocessing.cpu_count()) ] check_execute(checkout_cmd, timeout=60*30)
A callback function that raises an alarm.
def alarm_handler(signum, frame): """A callback function that raises an alarm.""" raise Alarm
Return a valid shell string from a given command list. >>> shell_join(['echo', 'Hello, World!']) "echo 'Hello, World!'"
def shell_join(command): """Return a valid shell string from a given command list. >>> shell_join(['echo', 'Hello, World!']) "echo 'Hello, World!'" """ return ' '.join([pipes.quote(x) for x in command])
Print a string to stderr and flush.
def debug_print(s, stderr=sys.stderr): """Print a string to stderr and flush.""" print(s, file=stderr) stderr.flush()
Print a command list as a shell string to stderr and flush.
def shell_debug_print(command, stderr=sys.stderr): """Print a command list as a shell string to stderr and flush.""" debug_print('$ ' + shell_join(command), stderr=stderr)
Execute a given command with an optional timeout in seconds. >>> execute(['echo', 'Hello, World!']) 0
def execute(command, timeout=None, stdout=sys.stdout, stderr=sys.stderr, **kwargs): """Execute a given command with an optional timeout in seconds. >>> execute(['echo', 'Hello, World!']) 0 """ if timeout is None: timeout = DEFAULT_EXECUTE_TIMEOUT shell_debug_print(command, stderr=stderr) returncode = 124 # timeout return code try: with Timeout(timeout): returncode = subprocess.call( command, stdout=stdout, stderr=stderr, **kwargs ) except Alarm: debug_print(command[0] + ': Timed out', stderr=stderr) return returncode
Check execute a given command and return its output. >>> check_execute_output(['echo', 'Hello, World!']) 'Hello, World!\n'
def check_execute_output(command, timeout=None, stdout=sys.stdout, stderr=sys.stderr, **kwargs): """Check execute a given command and return its output. >>> check_execute_output(['echo', 'Hello, World!']) 'Hello, World!\\n' """ if timeout is None: timeout = DEFAULT_EXECUTE_TIMEOUT shell_debug_print(command, stderr=stderr) try: with Timeout(timeout): output = subprocess.check_output( command, stderr=stderr, **kwargs ).decode('utf-8') except subprocess.CalledProcessError as e: debug_print(e, stderr=stderr) raise return output
Check execute a given command. >>> check_execute(['echo', 'Hello, World!']) 0
def check_execute(command, timeout=None, sandbox_profile=None, max_retries=1, stdout=sys.stdout, stderr=sys.stderr, **kwargs): """Check execute a given command. >>> check_execute(['echo', 'Hello, World!']) 0 """ if timeout is None: timeout = DEFAULT_EXECUTE_TIMEOUT if sandbox_profile: if platform.system() == 'Darwin': command = ['sandbox-exec', '-f', sandbox_profile] + command elif platform.system() == 'Linux': # TODO: remove explicit dns after Firejail bug is resolved command = ['firejail', '--quiet', '--profile=%s' % sandbox_profile, '--private=.', '--overlay-tmpfs', '--dns=8.8.8.8'] + command returncode = -1 for retry in range(max_retries): returncode = execute(command, timeout=timeout, stdout=stdout, stderr=stderr, **kwargs) if returncode == 0: return returncode raise ExecuteCommandFailure(command, returncode)
Perform a git submodule update operation on a path.
def git_submodule_update(path, stdout=sys.stdout, stderr=sys.stderr): """Perform a git submodule update operation on a path.""" command = ['git', '-C', path, 'submodule', 'update', '--init', '--recursive'] return check_execute(command, stdout=stdout, stderr=stderr)
Perform a git clean operation on a path.
def git_clean(path, stdout=sys.stdout, stderr=sys.stderr): """Perform a git clean operation on a path.""" command = ['git', '-C', path, 'clean', '-ffdx'] if platform.system() == 'Darwin': check_execute(['chflags', '-R', 'nouchg', path], stdout=stdout, stderr=stderr) return check_execute(command, stdout=stdout, stderr=stderr)
Perform a git pull operation on a path.
def git_pull(path, stdout=sys.stdout, stderr=sys.stderr): """Perform a git pull operation on a path.""" command = ['git', '-C', path, 'pull'] return check_execute(command, stdout=stdout, stderr=stderr)
Perform a git clone operation on a url to a path.
def git_clone(url, path, tree=None, recursive=True, stdout=sys.stdout, stderr=sys.stderr): """Perform a git clone operation on a url to a path.""" returncodes = [] command = ['git', 'clone', url, path] returncodes.append(check_execute(command, stdout=stdout, stderr=stderr)) if tree: returncodes.append(git_checkout(tree, path, force=True, stdout=stdout, stderr=stderr)) if recursive: returncodes.append(git_submodule_update(path, stdout=stdout, stderr=stderr)) return 0 if all(rc == 0 for rc in returncodes) else 1
Perform a git checkout operation on a path.
def git_checkout(tree, path, force=False, stdout=sys.stdout, stderr=sys.stderr): """Perform a git checkout operation on a path.""" command = ['git', '-C', path, 'checkout', tree] if force: command.insert(4, '-f') return check_execute(command, stdout=stdout, stderr=stderr)
Return the current sha of a Git repo at a path.
def git_sha(path, stdout=sys.stdout, stderr=sys.stderr): """Return the current sha of a Git repo at a path.""" command = ['git', '-C', path, 'rev-parse', 'HEAD'] return check_execute_output(command, stdout=stdout, stderr=stderr).strip()
Update a repository to a given sha if necessary.
def git_update(url, configured_sha, path, incremental=False, stdout=sys.stdout, stderr=sys.stderr): """Update a repository to a given sha if necessary.""" returncodes = [] try: if not incremental: git_clean(path, stdout=stdout, stderr=stderr) current_sha = git_sha(path, stdout=stdout, stderr=stderr) debug_print('current_sha: ' + current_sha, stderr=stderr) debug_print('configured_sha: ' + configured_sha, stderr=stderr) if current_sha != configured_sha: debug_print('current_sha != configured_sha', stderr=stderr) command_fetch = ['git', '-C', path, 'fetch'] returncodes.append(check_execute(command_fetch, stdout=stdout, stderr=stderr)) returncodes.append(git_checkout(configured_sha, path, force=True, stdout=stdout, stderr=stderr)) returncodes.append(git_submodule_update( path, stdout=stdout, stderr=stderr )) else: debug_print('current_sha == configured_sha', stderr=stderr) returncodes.append(git_checkout(configured_sha, path, force=True, stdout=stdout, stderr=stderr)) except ExecuteCommandFailure: debug_print("warning: Unable to update. Falling back to a clone.", stderr=stderr) check_execute(['rm', '-rf', path], stdout=stdout, stderr=stderr) return git_clone(url, path, tree=configured_sha, stdout=stdout, stderr=stderr) return 0 if all(rc == 0 for rc in returncodes) else 1
Return a path relative to a private workspace.
def private_workspace(path): """Return a path relative to a private workspace.""" if 'WORKSPACE' in os.environ: workspace = os.environ['WORKSPACE'] return os.path.abspath(os.path.join( os.path.dirname(os.path.dirname(workspace)), 'workspace-private', os.path.basename(workspace), path )) else: return os.path.abspath(path)
Configure the library for a specific branch. >>> set_swift_branch('main')
def set_swift_branch(branch): """Configure the library for a specific branch. >>> set_swift_branch('main') """ global swift_branch swift_branch = branch common.set_swift_branch(branch)
Return the corresponding stdlib name for a destination.
def get_stdlib_platform_path(swiftc, destination): """Return the corresponding stdlib name for a destination.""" platform_stdlib_path = { 'macOS': 'macosx', 'iOS': 'iphonesimulator', 'tvOS': 'appletvsimulator', 'watchOS': 'watchsimulator', } stdlib_dir = None for platform_key in platform_stdlib_path: if platform_key in destination: stdlib_dir = platform_stdlib_path[platform_key] break assert stdlib_dir is not None stdlib_path = os.path.join(os.path.dirname(os.path.dirname(swiftc)), 'lib/swift/' + stdlib_dir) return stdlib_path
Clean a Swift package manager project.
def clean_swift_package(path, swiftc, sandbox_profile, stdout=sys.stdout, stderr=sys.stderr): """Clean a Swift package manager project.""" swift = os.path.join(os.path.dirname(swiftc), 'swift') if swift_branch == 'swift-3.0-branch': command = [swift, 'build', '-C', path, '--clean'] else: command = [swift, 'package', '--package-path', path, 'clean'] if (swift_branch not in ['swift-3.0-branch', 'swift-3.1-branch']): command.insert(2, '--disable-sandbox') return common.check_execute(command, sandbox_profile=sandbox_profile, stdout=stdout, stderr=stderr)
Build a Swift package manager project.
def build_swift_package(path, swiftc, swift_version, configuration, sandbox_profile, stdout=sys.stdout, stderr=sys.stderr, added_swift_flags=None, incremental=False, override_swift_exec=None, build_tests=False): """Build a Swift package manager project.""" swift = os.path.join(os.path.dirname(swiftc), 'swift') if not incremental: clean_swift_package(path, swiftc, sandbox_profile, stdout=stdout, stderr=stderr) env = os.environ.copy() env['DYLD_LIBRARY_PATH'] = get_stdlib_platform_path(swiftc, 'macOS') env['SWIFT_EXEC'] = override_swift_exec or swiftc command = [swift, 'build', '--package-path', path, '--verbose', '--configuration', configuration] if (swift_branch not in ['swift-3.0-branch', 'swift-3.1-branch']): command.insert(2, '--disable-sandbox') if build_tests: command += ['--build-tests'] if sys.platform == "linux": command += ['--enable-test-discovery'] added_swift_flags += ' -enable-testing' if swift_version: if '.' not in swift_version: swift_version += '.0' major, minor = swift_version.split('.', 1) # Need to use float for minor version parsing # because it's possible that it would be specified # as e.g. `4.0.3` if int(major) == 4 and float(minor) == 2.0: command += ['-Xswiftc', '-swift-version', '-Xswiftc', swift_version] else: command += ['-Xswiftc', '-swift-version', '-Xswiftc', major] if added_swift_flags is not None: for flag in added_swift_flags.split(): command += ["-Xswiftc", flag] return common.check_execute(command, timeout=3600, sandbox_profile=sandbox_profile, stdout=stdout, stderr=stderr, env=env)
Test a Swift package manager project.
def test_swift_package(path, swiftc, sandbox_profile, stdout=sys.stdout, stderr=sys.stderr, added_swift_flags=None, incremental=False, override_swift_exec=None): """Test a Swift package manager project.""" swift = os.path.join(os.path.dirname(swiftc), 'swift') if not incremental: clean_swift_package(path, swiftc, sandbox_profile) env = os.environ env['SWIFT_EXEC'] = override_swift_exec or swiftc command = [swift, 'test', '-C', path, '--verbose'] if added_swift_flags is not None: for flag in added_swift_flags.split(): command += ["-Xswiftc", flag] if (swift_branch not in ['swift-3.0-branch', 'swift-3.1-branch']): command.insert(2, '--disable-sandbox') return common.check_execute(command, timeout=3600, sandbox_profile=sandbox_profile, stdout=stdout, stderr=stderr, env=env)
Checkout an indexed repository.
def checkout(root_path, repo, commit): """Checkout an indexed repository.""" path = os.path.join(root_path, repo['path']) if repo['repository'] == 'Git': if os.path.exists(path): return common.git_update(repo['url'], commit, path) else: return common.git_clone(repo['url'], path, tree=commit) raise common.Unreachable('Unsupported repository: %s' % repo['repository'])
Strip resource build phases from a given project.
def strip_resource_phases(repo_path, stdout=sys.stdout, stderr=sys.stderr): """Strip resource build phases from a given project.""" command = ['perl', '-i', '-00ne', 'print unless /Begin PBXResourcesBuildPhase/'] for root, dirs, files in os.walk(repo_path): for filename in files: if filename == 'project.pbxproj': pbxfile = os.path.join(root, filename) common.check_execute(command + [pbxfile], stdout=stdout, stderr=stderr)
Call functions corresponding to actions.
def dispatch(root_path, repo, action, swiftc, swift_version, sandbox_profile_xcodebuild, sandbox_profile_package, added_swift_flags, added_xcodebuild_flags, build_config, should_strip_resource_phases=False, stdout=sys.stdout, stderr=sys.stderr, incremental=False, time_reporter = None, override_swift_exec=None): """Call functions corresponding to actions.""" substitutions = action.copy() substitutions.update(repo) if added_swift_flags: # Support added swift flags specific to the current repository and # action by passing their fields as keyword arguments to format, e.g. # so that {path} in '-index-store-path /tmp/index/{path}' is replaced # with the value of repo's path field. added_swift_flags = added_swift_flags.format(**substitutions) if added_xcodebuild_flags: added_xcodebuild_flags = \ shlex.split(added_xcodebuild_flags.format(**substitutions)) else: added_xcodebuild_flags = [] if action['action'] == 'BuildSwiftPackage': if not build_config: build_config = action['configuration'] build_tests = (action.get('build_tests') == 'true' and build_config == 'debug') \ or (action.get('build_tests_release') and build_config == 'release') return build_swift_package(os.path.join(root_path, repo['path']), swiftc, swift_version, build_config, sandbox_profile_package, stdout=stdout, stderr=stderr, added_swift_flags=added_swift_flags, incremental=incremental, override_swift_exec=override_swift_exec, build_tests=build_tests) elif action['action'] == 'TestSwiftPackage': return test_swift_package(os.path.join(root_path, repo['path']), swiftc, sandbox_profile_package, stdout=stdout, stderr=stderr, added_swift_flags=added_swift_flags, incremental=incremental, override_swift_exec=override_swift_exec) elif re.match(r'^(Build|Test)Xcode(Workspace|Project)(Scheme|Target)$', action['action']): match = re.match( r'^(Build|Test)Xcode(Workspace|Project)(Scheme|Target)$', action['action'] ) initial_xcodebuild_flags = ['SWIFT_EXEC=%s' % (override_swift_exec or swiftc), '-IDEPackageSupportDisableManifestSandbox=YES'] if build_config == 'debug': initial_xcodebuild_flags += ['-configuration', 'Debug'] elif build_config == 'release': initial_xcodebuild_flags += ['-configuration', 'Release'] elif 'configuration' in action: initial_xcodebuild_flags += ['-configuration', action['configuration']] build_env = {} if 'environment' in action: build_env = action['environment'] pretargets = [] if 'pretargets' in action: pretargets = action['pretargets'] other_swift_flags = [] if swift_version: if '.' not in swift_version: swift_version += '.0' major, minor = swift_version.split('.', 1) # Need to use float for minor version parsing # because it's possible that it would be specified # as e.g. `4.0.3` if int(major) == 4 and float(minor) == 2.0: other_swift_flags += ['-swift-version', swift_version] initial_xcodebuild_flags += ['SWIFT_VERSION=%s' % swift_version] else: other_swift_flags += ['-swift-version', major] initial_xcodebuild_flags += ['SWIFT_VERSION=%s' % major] if added_swift_flags: other_swift_flags.append(added_swift_flags) if other_swift_flags: other_swift_flags = ['$(OTHER_SWIFT_FLAGS)'] + other_swift_flags initial_xcodebuild_flags += ['OTHER_SWIFT_FLAGS=%s' % ' '.join(other_swift_flags)] is_workspace = match.group(2).lower() == 'workspace' project_path = os.path.join(root_path, repo['path'], action[match.group(2).lower()]) has_scheme = match.group(3).lower() == 'scheme' clean_build = True if 'clean_build' in action: clean_build = action['clean_build'] xcode_target = \ XcodeTarget(swiftc, project_path, action[match.group(3).lower()], action['destination'], pretargets, build_env, initial_xcodebuild_flags + added_xcodebuild_flags, is_workspace, has_scheme, clean_build, stdout, stderr, action.get("external_build_folder", False)) if should_strip_resource_phases: strip_resource_phases(os.path.join(root_path, repo['path']), stdout=stdout, stderr=stderr) if match.group(1) == 'Build': return xcode_target.build(sandbox_profile_xcodebuild, stdout=stdout, stderr=stderr, incremental=incremental, time_reporter=time_reporter) else: return xcode_target.test(sandbox_profile_xcodebuild, stdout=stdout, stderr=stderr, incremental=incremental) else: raise common.Unimplemented("Unknown action: %s" % action['action'])
Return whether the specified swift version/platform/branch/configuration/job is xfailed.
def is_xfailed(xfail_args, compatible_version, platform, swift_branch, build_config, job_type): """Return whether the specified swift version/platform/branch/configuration/job is xfailed.""" if isinstance(xfail_args, dict): xfail_args = [xfail_args] def is_or_contains(spec, arg): return arg in spec if isinstance(spec, list) else spec == arg def matches(spec): issue = spec['issue'].split()[0] current = { 'compatibility': compatible_version, 'branch': swift_branch, 'platform': platform, 'job': job_type, } if 'configuration' in spec: if build_config is None: raise common.Unreachable("'xfail' entry contains 'configuration' " "but none supplied via '--build-config' or the containing " "action's 'configuration' field.") current['configuration'] = build_config.lower() for key, value in current.items(): if key in spec and not is_or_contains(spec[key], value): return None return issue for spec in xfail_args: issue = matches(spec) if issue is not None: return issue return None
Convert an argument string into a boolean.
def str2bool(s): """Convert an argument string into a boolean.""" if s.lower() == 'true': return True elif s.lower() == 'false': return False else: raise argparse.ArgumentTypeError('true/false boolean value expected.')
Add common arguments to parser.
def add_arguments(parser): """Add common arguments to parser.""" parser.register('type', 'bool', str2bool) parser.add_argument('--verbose', action='store_true') # TODO: remove Linux sandbox hack if platform.system() == 'Darwin': parser.add_argument('--swiftc', metavar='PATH', help='swiftc executable', required=True, type=os.path.abspath) parser.add_argument('--override-swift-exec', metavar='PATH', help='override the SWIFT_EXEC that is used to build the projects', type=os.path.abspath) else: parser.add_argument('--swiftc', metavar='PATH', help='swiftc executable', required=True) parser.add_argument('--override-swift-exec', metavar='PATH', help='override the SWIFT_EXEC that is used to build the projects') parser.add_argument('--projects', metavar='PATH', required=True, help='JSON project file', type=os.path.abspath) parser.add_argument('--swift-version', metavar='VERS', help='Swift version mode (default: None)') parser.add_argument('--include-repos', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to include a repo ' '(example: \'path == "Alamofire"\')') parser.add_argument('--exclude-repos', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to exclude a repo ' '(example: \'path == "Alamofire"\')') parser.add_argument('--include-versions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to include a Swift version ' '(example: ' '\'version == "3.0"\')') parser.add_argument('--exclude-versions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to exclude a Swift version ' '(example: ' '\'version == "3.0"\')') parser.add_argument('--include-actions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to include an action ' '(example: ' '\'action == "BuildXcodeWorkspaceScheme"\')') parser.add_argument('--exclude-actions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to exclude an action ' '(example: ' '\'action == "BuildXcodeWorkspaceScheme"\')') parser.add_argument('--swift-branch', metavar='BRANCH', help='Swift branch configuration to use', default='main') parser.add_argument('--sandbox-profile-xcodebuild', metavar='FILE', help='sandbox xcodebuild build and test operations ' 'with profile', type=os.path.abspath) parser.add_argument('--sandbox-profile-package', metavar='FILE', help='sandbox package build and test operations with ' 'profile', type=os.path.abspath) parser.add_argument("--test-incremental", help='test incremental-mode over multiple commits', action='store_true') parser.add_argument("--add-swift-flags", metavar="FLAGS", help='add flags to each Swift invocation (note: field ' 'names from projects.json enclosed in {} will be ' 'replaced with their value)', default='') parser.add_argument("--add-xcodebuild-flags", metavar="FLAGS", help='add flags to each xcodebuild invocation (note: field ' 'names from projects.json enclosed in {} will be ' 'replaced with their value)', default='') parser.add_argument("--skip-clean", help='skip all git and build clean steps before ' 'building projects', action='store_true'), parser.add_argument("--build-config", metavar="NAME", choices=['debug', 'release'], dest='build_config', help='specify "debug" or "release" to override ' 'the build configuration in the projects.json file') parser.add_argument("--strip-resource-phases", help='strip all resource phases from project file ' 'before building (default: true)', metavar='BOOL', type='bool', nargs='?', const=True, default=True) parser.add_argument("--project-cache-path", help='Path of the dir where all the project binaries will be placed', metavar='PATH', type=os.path.abspath, default='project_cache') parser.add_argument("--report-time-path", help='export time for building each xcode build target to the specified json file', type=os.path.abspath) parser.add_argument("--clang", help='clang executable to build Xcode projects', type=os.path.abspath) parser.add_argument("--job-type", help="The type of job to run. This influences which projects are XFailed, for example the stress tester tracks its XFails under a different job type. Defaults to 'source-compat'.", default='source-compat') parser.add_argument('--process-count', type=int, help='Number of parallel process to spawn when building projects', default=multiprocessing.cpu_count()) parser.add_argument('--junit', action='store_true', help='Write a junit.xml file containing the project build results')
Add common arguments to parser.
def add_minimal_arguments(parser): """Add common arguments to parser.""" parser.add_argument('--verbose', action='store_true') parser.add_argument('--projects', metavar='PATH', required=True, help='JSON project file', type=os.path.abspath) parser.add_argument('--include-repos', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to include a repo ' '(example: \'path == "Alamofire"\')') parser.add_argument('--exclude-repos', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to exclude a repo ' '(example: \'path == "Alamofire"\')') parser.add_argument('--include-versions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to include a Swift version ' '(example: ' '\'version == "3.0"\')') parser.add_argument('--exclude-versions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to exclude a Swift version ' '(example: ' '\'version == "3.0"\')') parser.add_argument('--include-actions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to include an action ' '(example: ' '\'action == "BuildXcodeWorkspaceScheme"\')') parser.add_argument('--exclude-actions', metavar='PREDICATE', default=[], action='append', help='a Python predicate to determine ' 'whether to exclude an action ' '(example: ' '\'action == "BuildXcodeWorkspaceScheme"\')') parser.add_argument('--swift-branch', metavar='BRANCH', help='Swift branch configuration to use', default='main')
Evaluate predicate in context of index element fields.
def evaluate_predicate(element, predicate): """Evaluate predicate in context of index element fields.""" # pylint: disable=I0011,W0122,W0123 for key in element: if isinstance(element[key], str): exec(key + ' = """' + element[key] + '"""') return eval(predicate)
Return whether an index element should be included.
def included_element(include_predicates, exclude_predicates, element): """Return whether an index element should be included.""" return (not any(evaluate_predicate(element, ep) for ep in exclude_predicates) and (include_predicates == [] or any(evaluate_predicate(element, ip) for ip in include_predicates)))
Return first value in dictionary by iterating through keys
def dict_get(dictionary, *args, **kwargs): """Return first value in dictionary by iterating through keys""" for key in args: try: return dictionary[key] except KeyError: pass if 'default' in kwargs: return kwargs['default'] else: raise KeyError
Return parsed command line arguments.
def parse_args(): """Return parsed command line arguments.""" parser = argparse.ArgumentParser() project.add_arguments(parser) parser.add_argument('--only-latest-versions', action='store_true') parser.add_argument('--default-timeout', type=int, help="override the default execute timeout (seconds)") return parser.parse_args()
Execute specified indexed project actions.
def main(): """Execute specified indexed project actions.""" args = parse_args() if args.default_timeout: common.set_default_execute_timeout(args.default_timeout) # DISABLED DUE TO: rdar://59302454. # To track removing this line: rdar://59302467. xcodebuild_flags = args.add_xcodebuild_flags xcodebuild_flags += (' ' if xcodebuild_flags else '') + 'DEBUG_INFORMATION_FORMAT=dwarf' # Use clang for building xcode projects. if args.clang: xcodebuild_flags += ' CC=%s' % args.clang swift_flags = args.add_swift_flags time_reporter = None if args.report_time_path: time_reporter = project.TimeReporter(args.report_time_path) with open(args.projects) as projects: index = json.loads(projects.read()) result = project.ProjectListBuilder( args.include_repos, args.exclude_repos, args.verbose, args.process_count, project.ProjectBuilder.factory( args.include_versions, args.exclude_versions, args.verbose, project.VersionBuilder.factory( args.include_actions, args.exclude_actions, args.verbose, project.CompatActionBuilder.factory( args.swiftc, args.swift_version, args.swift_branch, args.job_type, args.sandbox_profile_xcodebuild, args.sandbox_profile_package, swift_flags, xcodebuild_flags, args.skip_clean, args.build_config, args.strip_resource_phases, args.only_latest_versions, args.project_cache_path, time_reporter, args.override_swift_exec ), ), ), index ).build() common.debug_print(str(result)) if args.junit: with open('results.xml', 'w') as results: results.write(result.xml_string()) return 0 if result.result in [project.ResultEnum.PASS, project.ResultEnum.XFAIL] else 1
Return text stripped of trailing whitespace.
def strip_trailing_whitespace(text): """Return text stripped of trailing whitespace.""" return re.sub(r'\s+$', '', text, flags=re.M)
Return parsed command line arguments.
def parse_args(): """Return parsed command line arguments.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "project_index", help="a project index file to check (e.g. projects.json)", type=os.path.abspath ) return parser.parse_args()
Get a yacs CfgNode object with default values.
def get_config(args): """Get a yacs CfgNode object with default values.""" # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern config = _C.clone() update_config(config, args) return config
Build optimizer, set weight decay of normalization to 0 by default.
def build_optimizer(config, model, simmim=False, is_pretrain=False): """ Build optimizer, set weight decay of normalization to 0 by default. """ skip = {} skip_keywords = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay_keywords'): skip_keywords = model.no_weight_decay_keywords() if simmim: if is_pretrain: parameters = get_pretrain_param_groups(model, skip, skip_keywords) else: depths = config.MODEL.SWIN.DEPTHS if config.MODEL.TYPE == 'swin' else config.MODEL.SWINV2.DEPTHS num_layers = sum(depths) get_layer_func = partial(get_swin_layer, num_layers=num_layers + 2, depths=depths) scales = list(config.TRAIN.LAYER_DECAY ** i for i in reversed(range(num_layers + 2))) parameters = get_finetune_param_groups(model, config.TRAIN.BASE_LR, config.TRAIN.WEIGHT_DECAY, get_layer_func, scales, skip, skip_keywords) else: parameters = set_weight_decay(model, skip, skip_keywords) opt_lower = config.TRAIN.OPTIMIZER.NAME.lower() optimizer = None if opt_lower == 'sgd': optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) elif opt_lower == 'fused_adam': optimizer = FusedAdam(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) elif opt_lower == 'fused_lamb': optimizer = FusedLAMB(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) return optimizer
Checks if a file is an allowed extension. Args: filename (string): path to a file Returns: bool: True if the filename ends with a known image extension
def has_file_allowed_extension(filename, extensions): """Checks if a file is an allowed extension. Args: filename (string): path to a file Returns: bool: True if the filename ends with a known image extension """ filename_lower = filename.lower() return any(filename_lower.endswith(ext) for ext in extensions)
judge if this is a zip path
def is_zip_path(img_or_path): """judge if this is a zip path""" return '.zip@' in img_or_path
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions.
def mean_flat(tensor): """ https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape))))
Convenience routine for guessing which GPU device to run model on
def choose_torch_device() -> str: """Convenience routine for guessing which GPU device to run model on""" if torch.cuda.is_available(): return "cuda" if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): return "mps" return "cpu"
Returns an autocast compatible device from a torch device
def choose_autocast_device(device): """Returns an autocast compatible device from a torch device""" device_type = device.type # this returns 'mps' on M1 # autocast only for cuda, but GTX 16xx have issues with it if device_type == "cuda": device_name = torch.cuda.get_device_name() if "GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name: return device_type, nullcontext else: return device_type, autocast else: return "cpu", nullcontext
Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.
def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ tensors_gather = [ torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size()) ] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output
Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.
def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ tensors_gather = [ torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size()) ] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output
Performs all_gather operation on the provided tensors. Graph remains connected for backward grad computation.
def all_gather_with_grad(tensors): """ Performs all_gather operation on the provided tensors. Graph remains connected for backward grad computation. """ # Queue the gathered tensors world_size = torch.distributed.get_world_size() # There is no need for reduction in the single-proc case if world_size == 1: return tensors tensor_all = GatherLayer.apply(tensors) return torch.cat(tensor_all, dim=0)
Load weights from .npz checkpoints for official Google Brain Flax implementation
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""): """Load weights from .npz checkpoints for official Google Brain Flax implementation""" import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and "opt/target/embedding/kernel" in w: prefix = "opt/target/" if hasattr(model.patch_embed, "backbone"): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, "stem") stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_( adapt_input_conv( stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"]) ) ) stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"])) stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f"{prefix}block{i + 1}/unit{j + 1}/" for r in range(3): getattr(block, f"conv{r + 1}").weight.copy_( _n2p(w[f"{bp}conv{r + 1}/kernel"]) ) getattr(block, f"norm{r + 1}").weight.copy_( _n2p(w[f"{bp}gn{r + 1}/scale"]) ) getattr(block, f"norm{r + 1}").bias.copy_( _n2p(w[f"{bp}gn{r + 1}/bias"]) ) if block.downsample is not None: block.downsample.conv.weight.copy_( _n2p(w[f"{bp}conv_proj/kernel"]) ) block.downsample.norm.weight.copy_( _n2p(w[f"{bp}gn_proj/scale"]) ) block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"])) embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"]) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"]) ) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"])) model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False)) pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, "num_tokens", 1), model.patch_embed.grid_size, ) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"])) model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"])) # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f"{prefix}Transformer/encoderblock_{i}/" mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/" block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"])) block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"])) block.attn.qkv.weight.copy_( torch.cat( [ _n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T for n in ("query", "key", "value") ] ) ) block.attn.qkv.bias.copy_( torch.cat( [ _n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1) for n in ("query", "key", "value") ] ) ) block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"])) for r in range(2): getattr(block.mlp, f"fc{r + 1}").weight.copy_( _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"]) ) getattr(block.mlp, f"fc{r + 1}").bias.copy_( _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"]) ) block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"])) block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"]))
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self
Appends dimensions to the end of a tensor until it has target_dims dimensions. From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py
def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions. From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError( f"input has {x.ndim} dims but target_dims is {target_dims}, which is less" ) return x[(...,) + (None,) * dims_to_append]
Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs.
def model_wrapper( model, noise_schedule, model_type="noise", model_kwargs={}, guidance_type="uncond", condition=None, unconditional_condition=None, guidance_scale=1.0, classifier_fn=None, classifier_kwargs={}, ): """Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs. """ def get_model_input_time(t_continuous): """ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. For continuous-time DPMs, we just use `t_continuous`. """ if noise_schedule.schedule == "discrete": return (t_continuous - 1.0 / noise_schedule.total_N) * 1000.0 else: return t_continuous def noise_pred_fn(x, t_continuous, cond=None): if t_continuous.reshape((-1,)).shape[0] == 1: t_continuous = t_continuous.expand((x.shape[0])) t_input = get_model_input_time(t_continuous) if cond is None: output = model(x, t_input, **model_kwargs) else: output = model(x, t_input, cond, **model_kwargs) if model_type == "noise": return output elif model_type == "x_start": alpha_t, sigma_t = noise_schedule.marginal_alpha( t_continuous ), noise_schedule.marginal_std(t_continuous) dims = x.dim() return (x - expand_dims(alpha_t, dims) * output) / expand_dims( sigma_t, dims ) elif model_type == "v": alpha_t, sigma_t = noise_schedule.marginal_alpha( t_continuous ), noise_schedule.marginal_std(t_continuous) dims = x.dim() return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x elif model_type == "score": sigma_t = noise_schedule.marginal_std(t_continuous) dims = x.dim() return -expand_dims(sigma_t, dims) * output def cond_grad_fn(x, t_input): """ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). """ with torch.enable_grad(): x_in = x.detach().requires_grad_(True) log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) return torch.autograd.grad(log_prob.sum(), x_in)[0] def model_fn(x, t_continuous): """ The noise predicition model function that is used for DPM-Solver. """ if t_continuous.reshape((-1,)).shape[0] == 1: t_continuous = t_continuous.expand((x.shape[0])) if guidance_type == "uncond": return noise_pred_fn(x, t_continuous) elif guidance_type == "classifier": assert classifier_fn is not None t_input = get_model_input_time(t_continuous) cond_grad = cond_grad_fn(x, t_input) sigma_t = noise_schedule.marginal_std(t_continuous) noise = noise_pred_fn(x, t_continuous) return ( noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad ) elif guidance_type == "classifier-free": if guidance_scale == 1.0 or unconditional_condition is None: return noise_pred_fn(x, t_continuous, cond=condition) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t_continuous] * 2) c_in = torch.cat([unconditional_condition, condition]) noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) return noise_uncond + guidance_scale * (noise - noise_uncond) assert model_type in ["noise", "x_start", "v"] assert guidance_type in ["uncond", "classifier", "classifier-free"] return model_fn
A piecewise linear function y = f(x), using xp and yp as keypoints. We implement f(x) in a differentiable way (i.e. applicable for autograd). The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) Args: x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. yp: PyTorch tensor with shape [C, K]. Returns: The function values f(x), with shape [N, C].
def interpolate_fn(x, xp, yp): """ A piecewise linear function y = f(x), using xp and yp as keypoints. We implement f(x) in a differentiable way (i.e. applicable for autograd). The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) Args: x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. yp: PyTorch tensor with shape [C, K]. Returns: The function values f(x), with shape [N, C]. """ N, K = x.shape[0], xp.shape[1] all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) sorted_all_x, x_indices = torch.sort(all_x, dim=2) x_idx = torch.argmin(x_indices, dim=2) cand_start_idx = x_idx - 1 start_idx = torch.where( torch.eq(x_idx, 0), torch.tensor(1, device=x.device), torch.where( torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, ), ) end_idx = torch.where( torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1 ) start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) start_idx2 = torch.where( torch.eq(x_idx, 0), torch.tensor(0, device=x.device), torch.where( torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, ), ) y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) start_y = torch.gather( y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2) ).squeeze(2) end_y = torch.gather( y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2) ).squeeze(2) cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) return cand
Expand the tensor `v` to the dim `dims`. Args: `v`: a PyTorch tensor with shape [N]. `dim`: a `int`. Returns: a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
def expand_dims(v, dims): """ Expand the tensor `v` to the dim `dims`. Args: `v`: a PyTorch tensor with shape [N]. `dim`: a `int`. Returns: a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. """ return v[(...,) + (None,) * (dims - 1)]
Zero out the parameters of a module and return it.
def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module
This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need".
def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb
A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, )
def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial**2) * c model.total_ops += th.DoubleTensor([matmul_ops])
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities.
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas)
Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing.
def checkpoint(func, inputs, params, flag): """ Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing. """ if flag: args = tuple(inputs) + tuple(params) return CheckpointFunction.apply(func, len(inputs), *args) else: return func(*inputs)
Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings.
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): """ Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. """ if not repeat_only: half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half ).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat( [embedding, torch.zeros_like(embedding[:, :1])], dim=-1 ) else: embedding = repeat(timesteps, "b -> b d", d=dim) return embedding
Zero out the parameters of a module and return it.
def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module
Scale the parameters of a module and return it.
def scale_module(module, scale): """ Scale the parameters of a module and return it. """ for p in module.parameters(): p.detach().mul_(scale) return module
Take the mean over all non-batch dimensions.
def mean_flat(tensor): """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape))))
Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization.
def normalization(channels): """ Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(32, channels)
Create a 1D, 2D, or 3D convolution module.
def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}")
Create a linear module.
def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs)
Create a 1D, 2D, or 3D average pooling module.
def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}")
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 Compute the KL divergence between two gaussians. Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases.
def normal_kl(mean1, logvar1, mean2, logvar2): """ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 Compute the KL divergence between two gaussians. Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases. """ tensor = None for obj in (mean1, logvar1, mean2, logvar2): if isinstance(obj, torch.Tensor): tensor = obj break assert tensor is not None, "at least one argument must be a Tensor" # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). logvar1, logvar2 = [ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) for x in (logvar1, logvar2) ] return 0.5 * ( -1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) )
Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image
def modcrop_np(img, sf): """ Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image """ w, h = img.shape[:2] im = np.copy(img) return im[: w - w % sf, : h - h % sf, ...]
Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)
def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum()
generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot( np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0]), ) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction
def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x
x: image, NxcxHxW k: kernel, Nx1xhxw
def blur(x, k): """ x: image, NxcxHxW k: kernel, Nx1xhxw """ n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode="replicate") k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x
" # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf
def gen_kernel( k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10.0, noise_level=0, ): """ " # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf """ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix lambda_1 = min_var + np.random.rand() * (max_var - min_var) lambda_2 = min_var + np.random.rand() * (max_var - min_var) theta = np.random.rand() * np.pi # random theta noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) Z = np.stack([X, Y], 2)[:, :, :, None] # Calcualte Gaussian for every pixel of the kernel ZZ = Z - MU ZZ_t = ZZ.transpose(0, 1, 3, 2) raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) # shift the kernel so it will be centered # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) # Normalize the kernel and return # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) kernel = raw_kernel / np.sum(raw_kernel) return kernel
python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
def fspecial(filter_type, *args, **kwargs): """ python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py """ if filter_type == "gaussian": return fspecial_gaussian(*args, **kwargs) if filter_type == "laplacian": return fspecial_laplacian(*args, **kwargs)
Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image
def bicubic_degradation(x, sf=3): """ Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image """ x = util.imresize_np(x, scale=1 / sf) return x
blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} }
def srmd_degradation(x, k, sf=3): """blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } """ x = ndimage.filters.convolve( x, np.expand_dims(k, axis=2), mode="wrap" ) # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x
bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} }
def dpsr_degradation(x, k, sf=3): """bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } """ x = bicubic_degradation(x, sf=sf) x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") return x
blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image
def classical_degradation(x, k, sf=3): """blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image """ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...]
USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int):
def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype("float32") soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f"img size ({h1}X{w1}) is too small!") hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize( img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = ( shuffle_order[idx2], shuffle_order[idx1], ) for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize( img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve( img, np.expand_dims(k_shifted, axis=2), mode="mirror" ) img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize( img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]), ) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan_variant(image, sf=4, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 h1, w1 = image.shape[:2] image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize( image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = ( shuffle_order[idx2], shuffle_order[idx1], ) for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) elif i == 1: image = add_blur(image, sf=sf) elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) image = cv2.resize( image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3]), ) else: k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve( image, np.expand_dims(k_shifted, axis=2), mode="mirror" ) image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize( image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]), ) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) example = {"image": image} return example
This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan_plus( img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None ): """ This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ h1, w1 = img.shape[:2] img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f"img size ({h1}X{w1}) is too small!") if use_sharp: img = add_sharpening(img) hq = img.copy() if random.random() < shuffle_prob: shuffle_order = random.sample(range(13), 13) else: shuffle_order = list(range(13)) # local shuffle for noise, JPEG is always the last one shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_resize(img, sf=sf) elif i == 2: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 3: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 4: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 5: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) elif i == 6: img = add_JPEG_noise(img) elif i == 7: img = add_blur(img, sf=sf) elif i == 8: img = add_resize(img, sf=sf) elif i == 9: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 10: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 11: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 12: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) else: print("check the shuffle!") # resize to desired size img = cv2.resize( img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), interpolation=random.choice([1, 2, 3]), ) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf, lq_patchsize) return img, hq
Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image
def modcrop_np(img, sf): """ Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image """ w, h = img.shape[:2] im = np.copy(img) return im[: w - w % sf, : h - h % sf, ...]
Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)
def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum()
generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot( np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0]), ) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction
def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x
x: image, NxcxHxW k: kernel, Nx1xhxw
def blur(x, k): """ x: image, NxcxHxW k: kernel, Nx1xhxw """ n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode="replicate") k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x
" # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf
def gen_kernel( k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10.0, noise_level=0, ): """ " # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf """ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix lambda_1 = min_var + np.random.rand() * (max_var - min_var) lambda_2 = min_var + np.random.rand() * (max_var - min_var) theta = np.random.rand() * np.pi # random theta noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) Z = np.stack([X, Y], 2)[:, :, :, None] # Calcualte Gaussian for every pixel of the kernel ZZ = Z - MU ZZ_t = ZZ.transpose(0, 1, 3, 2) raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) # shift the kernel so it will be centered # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) # Normalize the kernel and return # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) kernel = raw_kernel / np.sum(raw_kernel) return kernel
python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
def fspecial(filter_type, *args, **kwargs): """ python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py """ if filter_type == "gaussian": return fspecial_gaussian(*args, **kwargs) if filter_type == "laplacian": return fspecial_laplacian(*args, **kwargs)