Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
6,600 | awslabs/sockeye | sockeye/config.py | Config.save | def save(self, fname: str):
"""
Saves this Config (without the frozen state) to a file called fname.
:param fname: Name of file to store this Config in.
"""
obj = copy.deepcopy(self)
obj.__del_frozen()
with open(fname, 'w') as out:
yaml.dump(obj, out, default_flow_style=False) | python | def save(self, fname: str):
"""
Saves this Config (without the frozen state) to a file called fname.
:param fname: Name of file to store this Config in.
"""
obj = copy.deepcopy(self)
obj.__del_frozen()
with open(fname, 'w') as out:
yaml.dump(obj, out, default_flow_style=False) | ['def', 'save', '(', 'self', ',', 'fname', ':', 'str', ')', ':', 'obj', '=', 'copy', '.', 'deepcopy', '(', 'self', ')', 'obj', '.', '__del_frozen', '(', ')', 'with', 'open', '(', 'fname', ',', "'w'", ')', 'as', 'out', ':', 'yaml', '.', 'dump', '(', 'obj', ',', 'out', ',', 'default_flow_style', '=', 'False', ')'] | Saves this Config (without the frozen state) to a file called fname.
:param fname: Name of file to store this Config in. | ['Saves', 'this', 'Config', '(', 'without', 'the', 'frozen', 'state', ')', 'to', 'a', 'file', 'called', 'fname', '.'] | train | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/config.py#L102-L111 |
6,601 | robromano/django-adminrestrict | adminrestrict/middleware.py | get_ip_address_from_request | def get_ip_address_from_request(request):
"""
Makes the best attempt to get the client's real IP or return the loopback
"""
PRIVATE_IPS_PREFIX = ('10.', '172.', '192.', '127.')
ip_address = ''
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '')
if x_forwarded_for and ',' not in x_forwarded_for:
if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_forwarded_for):
ip_address = x_forwarded_for.strip()
else:
ips = [ip.strip() for ip in x_forwarded_for.split(',')]
for ip in ips:
if ip.startswith(PRIVATE_IPS_PREFIX):
continue
elif not is_valid_ip(ip):
continue
else:
ip_address = ip
break
if not ip_address:
x_real_ip = request.META.get('HTTP_X_REAL_IP', '')
if x_real_ip:
if not x_real_ip.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_real_ip):
ip_address = x_real_ip.strip()
if not ip_address:
remote_addr = request.META.get('REMOTE_ADDR', '')
if remote_addr:
if not remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr):
ip_address = remote_addr.strip()
if remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr):
ip_address = remote_addr.strip()
if not ip_address:
ip_address = '127.0.0.1'
return ip_address | python | def get_ip_address_from_request(request):
"""
Makes the best attempt to get the client's real IP or return the loopback
"""
PRIVATE_IPS_PREFIX = ('10.', '172.', '192.', '127.')
ip_address = ''
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '')
if x_forwarded_for and ',' not in x_forwarded_for:
if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_forwarded_for):
ip_address = x_forwarded_for.strip()
else:
ips = [ip.strip() for ip in x_forwarded_for.split(',')]
for ip in ips:
if ip.startswith(PRIVATE_IPS_PREFIX):
continue
elif not is_valid_ip(ip):
continue
else:
ip_address = ip
break
if not ip_address:
x_real_ip = request.META.get('HTTP_X_REAL_IP', '')
if x_real_ip:
if not x_real_ip.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_real_ip):
ip_address = x_real_ip.strip()
if not ip_address:
remote_addr = request.META.get('REMOTE_ADDR', '')
if remote_addr:
if not remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr):
ip_address = remote_addr.strip()
if remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr):
ip_address = remote_addr.strip()
if not ip_address:
ip_address = '127.0.0.1'
return ip_address | ['def', 'get_ip_address_from_request', '(', 'request', ')', ':', 'PRIVATE_IPS_PREFIX', '=', '(', "'10.'", ',', "'172.'", ',', "'192.'", ',', "'127.'", ')', 'ip_address', '=', "''", 'x_forwarded_for', '=', 'request', '.', 'META', '.', 'get', '(', "'HTTP_X_FORWARDED_FOR'", ',', "''", ')', 'if', 'x_forwarded_for', 'and', "','", 'not', 'in', 'x_forwarded_for', ':', 'if', 'not', 'x_forwarded_for', '.', 'startswith', '(', 'PRIVATE_IPS_PREFIX', ')', 'and', 'is_valid_ip', '(', 'x_forwarded_for', ')', ':', 'ip_address', '=', 'x_forwarded_for', '.', 'strip', '(', ')', 'else', ':', 'ips', '=', '[', 'ip', '.', 'strip', '(', ')', 'for', 'ip', 'in', 'x_forwarded_for', '.', 'split', '(', "','", ')', ']', 'for', 'ip', 'in', 'ips', ':', 'if', 'ip', '.', 'startswith', '(', 'PRIVATE_IPS_PREFIX', ')', ':', 'continue', 'elif', 'not', 'is_valid_ip', '(', 'ip', ')', ':', 'continue', 'else', ':', 'ip_address', '=', 'ip', 'break', 'if', 'not', 'ip_address', ':', 'x_real_ip', '=', 'request', '.', 'META', '.', 'get', '(', "'HTTP_X_REAL_IP'", ',', "''", ')', 'if', 'x_real_ip', ':', 'if', 'not', 'x_real_ip', '.', 'startswith', '(', 'PRIVATE_IPS_PREFIX', ')', 'and', 'is_valid_ip', '(', 'x_real_ip', ')', ':', 'ip_address', '=', 'x_real_ip', '.', 'strip', '(', ')', 'if', 'not', 'ip_address', ':', 'remote_addr', '=', 'request', '.', 'META', '.', 'get', '(', "'REMOTE_ADDR'", ',', "''", ')', 'if', 'remote_addr', ':', 'if', 'not', 'remote_addr', '.', 'startswith', '(', 'PRIVATE_IPS_PREFIX', ')', 'and', 'is_valid_ip', '(', 'remote_addr', ')', ':', 'ip_address', '=', 'remote_addr', '.', 'strip', '(', ')', 'if', 'remote_addr', '.', 'startswith', '(', 'PRIVATE_IPS_PREFIX', ')', 'and', 'is_valid_ip', '(', 'remote_addr', ')', ':', 'ip_address', '=', 'remote_addr', '.', 'strip', '(', ')', 'if', 'not', 'ip_address', ':', 'ip_address', '=', "'127.0.0.1'", 'return', 'ip_address'] | Makes the best attempt to get the client's real IP or return the loopback | ['Makes', 'the', 'best', 'attempt', 'to', 'get', 'the', 'client', 's', 'real', 'IP', 'or', 'return', 'the', 'loopback'] | train | https://github.com/robromano/django-adminrestrict/blob/f05fd21e49677731e3d291da956b84bcac9a5c69/adminrestrict/middleware.py#L44-L78 |
6,602 | globocom/GloboNetworkAPI-client-python | networkapiclient/Equipamento.py | Equipamento.get_all | def get_all(self):
"""Return all equipments in database
:return: Dictionary with the following structure:
::
{'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} }
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
url = 'equipment/all'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | python | def get_all(self):
"""Return all equipments in database
:return: Dictionary with the following structure:
::
{'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} }
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
url = 'equipment/all'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | ['def', 'get_all', '(', 'self', ')', ':', 'url', '=', "'equipment/all'", 'code', ',', 'xml', '=', 'self', '.', 'submit', '(', 'None', ',', "'GET'", ',', 'url', ')', 'return', 'self', '.', 'response', '(', 'code', ',', 'xml', ')'] | Return all equipments in database
:return: Dictionary with the following structure:
::
{'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} }
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | ['Return', 'all', 'equipments', 'in', 'database'] | train | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Equipamento.py#L596-L612 |
6,603 | divio/python-mautic | mautic/api.py | API.get | def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) | python | def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) | ['def', 'get', '(', 'self', ',', 'obj_id', ')', ':', 'response', '=', 'self', '.', '_client', '.', 'session', '.', 'get', '(', "'{url}/{id}'", '.', 'format', '(', 'url', '=', 'self', '.', 'endpoint_url', ',', 'id', '=', 'obj_id', ')', ')', 'return', 'self', '.', 'process_response', '(', 'response', ')'] | Get a single item
:param obj_id: int
:return: dict|str | ['Get', 'a', 'single', 'item'] | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/api.py#L104-L116 |
6,604 | singularityhub/singularity-cli | spython/oci/cmd/actions.py | _run | def _run(self, bundle,
container_id=None,
empty_process=False,
log_path=None,
pid_file=None,
sync_socket=None,
command="run",
log_format="kubernetes"):
''' _run is the base function for run and create, the only difference
between the two being that run does not have an option for sync_socket.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
command: the command (run or create) to use (default is run)
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
container_id = self.get_container_id(container_id)
# singularity oci create
cmd = self._init_command(command)
# Check that the bundle exists
if not os.path.exists(bundle):
bot.exit('Bundle not found at %s' % bundle)
# Add the bundle
cmd = cmd + ['--bundle', bundle]
# Additional Logging Files
cmd = cmd + ['--log-format', log_format]
if log_path != None:
cmd = cmd + ['--log-path', log_path]
if pid_file != None:
cmd = cmd + ['--pid-file', pid_file]
if sync_socket != None:
cmd = cmd + ['--sync-socket', sync_socket]
if empty_process:
cmd.append('--empty-process')
# Finally, add the container_id
cmd.append(container_id)
# Generate the instance
result = self._send_command(cmd, sudo=True)
# Get the status to report to the user!
# TODO: Singularity seems to create even with error, can we check and
# delete for the user if this happens?
return self.state(container_id, sudo=True, sync_socket=sync_socket) | python | def _run(self, bundle,
container_id=None,
empty_process=False,
log_path=None,
pid_file=None,
sync_socket=None,
command="run",
log_format="kubernetes"):
''' _run is the base function for run and create, the only difference
between the two being that run does not have an option for sync_socket.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
command: the command (run or create) to use (default is run)
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
container_id = self.get_container_id(container_id)
# singularity oci create
cmd = self._init_command(command)
# Check that the bundle exists
if not os.path.exists(bundle):
bot.exit('Bundle not found at %s' % bundle)
# Add the bundle
cmd = cmd + ['--bundle', bundle]
# Additional Logging Files
cmd = cmd + ['--log-format', log_format]
if log_path != None:
cmd = cmd + ['--log-path', log_path]
if pid_file != None:
cmd = cmd + ['--pid-file', pid_file]
if sync_socket != None:
cmd = cmd + ['--sync-socket', sync_socket]
if empty_process:
cmd.append('--empty-process')
# Finally, add the container_id
cmd.append(container_id)
# Generate the instance
result = self._send_command(cmd, sudo=True)
# Get the status to report to the user!
# TODO: Singularity seems to create even with error, can we check and
# delete for the user if this happens?
return self.state(container_id, sudo=True, sync_socket=sync_socket) | ['def', '_run', '(', 'self', ',', 'bundle', ',', 'container_id', '=', 'None', ',', 'empty_process', '=', 'False', ',', 'log_path', '=', 'None', ',', 'pid_file', '=', 'None', ',', 'sync_socket', '=', 'None', ',', 'command', '=', '"run"', ',', 'log_format', '=', '"kubernetes"', ')', ':', 'container_id', '=', 'self', '.', 'get_container_id', '(', 'container_id', ')', '# singularity oci create', 'cmd', '=', 'self', '.', '_init_command', '(', 'command', ')', '# Check that the bundle exists', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'bundle', ')', ':', 'bot', '.', 'exit', '(', "'Bundle not found at %s'", '%', 'bundle', ')', '# Add the bundle', 'cmd', '=', 'cmd', '+', '[', "'--bundle'", ',', 'bundle', ']', '# Additional Logging Files', 'cmd', '=', 'cmd', '+', '[', "'--log-format'", ',', 'log_format', ']', 'if', 'log_path', '!=', 'None', ':', 'cmd', '=', 'cmd', '+', '[', "'--log-path'", ',', 'log_path', ']', 'if', 'pid_file', '!=', 'None', ':', 'cmd', '=', 'cmd', '+', '[', "'--pid-file'", ',', 'pid_file', ']', 'if', 'sync_socket', '!=', 'None', ':', 'cmd', '=', 'cmd', '+', '[', "'--sync-socket'", ',', 'sync_socket', ']', 'if', 'empty_process', ':', 'cmd', '.', 'append', '(', "'--empty-process'", ')', '# Finally, add the container_id', 'cmd', '.', 'append', '(', 'container_id', ')', '# Generate the instance', 'result', '=', 'self', '.', '_send_command', '(', 'cmd', ',', 'sudo', '=', 'True', ')', '# Get the status to report to the user!', '# TODO: Singularity seems to create even with error, can we check and', '# delete for the user if this happens?', 'return', 'self', '.', 'state', '(', 'container_id', ',', 'sudo', '=', 'True', ',', 'sync_socket', '=', 'sync_socket', ')'] | _run is the base function for run and create, the only difference
between the two being that run does not have an option for sync_socket.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
command: the command (run or create) to use (default is run)
log_format: defaults to kubernetes. Can also be "basic" or "json" | ['_run', 'is', 'the', 'base', 'function', 'for', 'run', 'and', 'create', 'the', 'only', 'difference', 'between', 'the', 'two', 'being', 'that', 'run', 'does', 'not', 'have', 'an', 'option', 'for', 'sync_socket', '.'] | train | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/oci/cmd/actions.py#L78-L140 |
6,605 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.get_events | def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop('force', False)
response = api.request_sync_events(self.blink,
self.network_id,
force=force)
try:
return response['event']
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s",
response,
exc_info=True)
return False | python | def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop('force', False)
response = api.request_sync_events(self.blink,
self.network_id,
force=force)
try:
return response['event']
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s",
response,
exc_info=True)
return False | ['def', 'get_events', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'force', '=', 'kwargs', '.', 'pop', '(', "'force'", ',', 'False', ')', 'response', '=', 'api', '.', 'request_sync_events', '(', 'self', '.', 'blink', ',', 'self', '.', 'network_id', ',', 'force', '=', 'force', ')', 'try', ':', 'return', 'response', '[', "'event'", ']', 'except', '(', 'TypeError', ',', 'KeyError', ')', ':', '_LOGGER', '.', 'error', '(', '"Could not extract events: %s"', ',', 'response', ',', 'exc_info', '=', 'True', ')', 'return', 'False'] | Retrieve events from server. | ['Retrieve', 'events', 'from', 'server', '.'] | train | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L125-L137 |
6,606 | GNS3/gns3-server | gns3server/compute/dynamips/nodes/c2600.py | C2600._setup_chassis | def _setup_chassis(self):
"""
Sets up the router with the corresponding chassis
(create slots and insert default adapters).
"""
self._create_slots(2)
self._slots[0] = self.integrated_adapters[self._chassis]() | python | def _setup_chassis(self):
"""
Sets up the router with the corresponding chassis
(create slots and insert default adapters).
"""
self._create_slots(2)
self._slots[0] = self.integrated_adapters[self._chassis]() | ['def', '_setup_chassis', '(', 'self', ')', ':', 'self', '.', '_create_slots', '(', '2', ')', 'self', '.', '_slots', '[', '0', ']', '=', 'self', '.', 'integrated_adapters', '[', 'self', '.', '_chassis', ']', '(', ')'] | Sets up the router with the corresponding chassis
(create slots and insert default adapters). | ['Sets', 'up', 'the', 'router', 'with', 'the', 'corresponding', 'chassis', '(', 'create', 'slots', 'and', 'insert', 'default', 'adapters', ')', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/c2600.py#L96-L103 |
6,607 | SeleniumHQ/selenium | py/selenium/webdriver/common/proxy.py | Proxy.add_to_capabilities | def add_to_capabilities(self, capabilities):
"""
Adds proxy information as capability in specified capabilities.
:Args:
- capabilities: The capabilities to which proxy will be added.
"""
proxy_caps = {}
proxy_caps['proxyType'] = self.proxyType['string']
if self.autodetect:
proxy_caps['autodetect'] = self.autodetect
if self.ftpProxy:
proxy_caps['ftpProxy'] = self.ftpProxy
if self.httpProxy:
proxy_caps['httpProxy'] = self.httpProxy
if self.proxyAutoconfigUrl:
proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl
if self.sslProxy:
proxy_caps['sslProxy'] = self.sslProxy
if self.noProxy:
proxy_caps['noProxy'] = self.noProxy
if self.socksProxy:
proxy_caps['socksProxy'] = self.socksProxy
if self.socksUsername:
proxy_caps['socksUsername'] = self.socksUsername
if self.socksPassword:
proxy_caps['socksPassword'] = self.socksPassword
capabilities['proxy'] = proxy_caps | python | def add_to_capabilities(self, capabilities):
"""
Adds proxy information as capability in specified capabilities.
:Args:
- capabilities: The capabilities to which proxy will be added.
"""
proxy_caps = {}
proxy_caps['proxyType'] = self.proxyType['string']
if self.autodetect:
proxy_caps['autodetect'] = self.autodetect
if self.ftpProxy:
proxy_caps['ftpProxy'] = self.ftpProxy
if self.httpProxy:
proxy_caps['httpProxy'] = self.httpProxy
if self.proxyAutoconfigUrl:
proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl
if self.sslProxy:
proxy_caps['sslProxy'] = self.sslProxy
if self.noProxy:
proxy_caps['noProxy'] = self.noProxy
if self.socksProxy:
proxy_caps['socksProxy'] = self.socksProxy
if self.socksUsername:
proxy_caps['socksUsername'] = self.socksUsername
if self.socksPassword:
proxy_caps['socksPassword'] = self.socksPassword
capabilities['proxy'] = proxy_caps | ['def', 'add_to_capabilities', '(', 'self', ',', 'capabilities', ')', ':', 'proxy_caps', '=', '{', '}', 'proxy_caps', '[', "'proxyType'", ']', '=', 'self', '.', 'proxyType', '[', "'string'", ']', 'if', 'self', '.', 'autodetect', ':', 'proxy_caps', '[', "'autodetect'", ']', '=', 'self', '.', 'autodetect', 'if', 'self', '.', 'ftpProxy', ':', 'proxy_caps', '[', "'ftpProxy'", ']', '=', 'self', '.', 'ftpProxy', 'if', 'self', '.', 'httpProxy', ':', 'proxy_caps', '[', "'httpProxy'", ']', '=', 'self', '.', 'httpProxy', 'if', 'self', '.', 'proxyAutoconfigUrl', ':', 'proxy_caps', '[', "'proxyAutoconfigUrl'", ']', '=', 'self', '.', 'proxyAutoconfigUrl', 'if', 'self', '.', 'sslProxy', ':', 'proxy_caps', '[', "'sslProxy'", ']', '=', 'self', '.', 'sslProxy', 'if', 'self', '.', 'noProxy', ':', 'proxy_caps', '[', "'noProxy'", ']', '=', 'self', '.', 'noProxy', 'if', 'self', '.', 'socksProxy', ':', 'proxy_caps', '[', "'socksProxy'", ']', '=', 'self', '.', 'socksProxy', 'if', 'self', '.', 'socksUsername', ':', 'proxy_caps', '[', "'socksUsername'", ']', '=', 'self', '.', 'socksUsername', 'if', 'self', '.', 'socksPassword', ':', 'proxy_caps', '[', "'socksPassword'", ']', '=', 'self', '.', 'socksPassword', 'capabilities', '[', "'proxy'", ']', '=', 'proxy_caps'] | Adds proxy information as capability in specified capabilities.
:Args:
- capabilities: The capabilities to which proxy will be added. | ['Adds', 'proxy', 'information', 'as', 'capability', 'in', 'specified', 'capabilities', '.'] | train | https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/common/proxy.py#L307-L334 |
6,608 | fabioz/PyDev.Debugger | third_party/pep8/pycodestyle.py | Checker.build_tokens_line | def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[(' and
text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping | python | def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[(' and
text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping | ['def', 'build_tokens_line', '(', 'self', ')', ':', 'logical', '=', '[', ']', 'comments', '=', '[', ']', 'length', '=', '0', 'prev_row', '=', 'prev_col', '=', 'mapping', '=', 'None', 'for', 'token_type', ',', 'text', ',', 'start', ',', 'end', ',', 'line', 'in', 'self', '.', 'tokens', ':', 'if', 'token_type', 'in', 'SKIP_TOKENS', ':', 'continue', 'if', 'not', 'mapping', ':', 'mapping', '=', '[', '(', '0', ',', 'start', ')', ']', 'if', 'token_type', '==', 'tokenize', '.', 'COMMENT', ':', 'comments', '.', 'append', '(', 'text', ')', 'continue', 'if', 'token_type', '==', 'tokenize', '.', 'STRING', ':', 'text', '=', 'mute_string', '(', 'text', ')', 'if', 'prev_row', ':', '(', 'start_row', ',', 'start_col', ')', '=', 'start', 'if', 'prev_row', '!=', 'start_row', ':', '# different row', 'prev_text', '=', 'self', '.', 'lines', '[', 'prev_row', '-', '1', ']', '[', 'prev_col', '-', '1', ']', 'if', 'prev_text', '==', "','", 'or', '(', 'prev_text', 'not', 'in', "'{[('", 'and', 'text', 'not', 'in', "'}])'", ')', ':', 'text', '=', "' '", '+', 'text', 'elif', 'prev_col', '!=', 'start_col', ':', '# different column', 'text', '=', 'line', '[', 'prev_col', ':', 'start_col', ']', '+', 'text', 'logical', '.', 'append', '(', 'text', ')', 'length', '+=', 'len', '(', 'text', ')', 'mapping', '.', 'append', '(', '(', 'length', ',', 'end', ')', ')', '(', 'prev_row', ',', 'prev_col', ')', '=', 'end', 'self', '.', 'logical_line', '=', "''", '.', 'join', '(', 'logical', ')', 'self', '.', 'noqa', '=', 'comments', 'and', 'noqa', '(', "''", '.', 'join', '(', 'comments', ')', ')', 'return', 'mapping'] | Build a logical line from tokens. | ['Build', 'a', 'logical', 'line', 'from', 'tokens', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1609-L1640 |
6,609 | HiPERCAM/hcam_widgets | hcam_widgets/widgets.py | ListInt.set | def set(self, num):
"""
Sets current value to num
"""
if self.validate(num) is not None:
self.index = self.allowed.index(num)
IntegerEntry.set(self, num) | python | def set(self, num):
"""
Sets current value to num
"""
if self.validate(num) is not None:
self.index = self.allowed.index(num)
IntegerEntry.set(self, num) | ['def', 'set', '(', 'self', ',', 'num', ')', ':', 'if', 'self', '.', 'validate', '(', 'num', ')', 'is', 'not', 'None', ':', 'self', '.', 'index', '=', 'self', '.', 'allowed', '.', 'index', '(', 'num', ')', 'IntegerEntry', '.', 'set', '(', 'self', ',', 'num', ')'] | Sets current value to num | ['Sets', 'current', 'value', 'to', 'num'] | train | https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/widgets.py#L749-L755 |
6,610 | marshallward/f90nml | f90nml/__init__.py | write | def write(nml, nml_path, force=False, sort=False):
"""Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True)
"""
# Promote dicts to Namelists
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort) | python | def write(nml, nml_path, force=False, sort=False):
"""Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True)
"""
# Promote dicts to Namelists
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort) | ['def', 'write', '(', 'nml', ',', 'nml_path', ',', 'force', '=', 'False', ',', 'sort', '=', 'False', ')', ':', '# Promote dicts to Namelists', 'if', 'not', 'isinstance', '(', 'nml', ',', 'Namelist', ')', 'and', 'isinstance', '(', 'nml', ',', 'dict', ')', ':', 'nml_in', '=', 'Namelist', '(', 'nml', ')', 'else', ':', 'nml_in', '=', 'nml', 'nml_in', '.', 'write', '(', 'nml_path', ',', 'force', '=', 'force', ',', 'sort', '=', 'sort', ')'] | Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True) | ['Save', 'a', 'namelist', 'to', 'disk', 'using', 'either', 'a', 'file', 'object', 'or', 'its', 'file', 'path', '.'] | train | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/__init__.py#L50-L82 |
6,611 | Equitable/trump | trump/orm.py | SymbolManager.build_view_from_tag | def build_view_from_tag(self, tag):
"""
Build a view of group of Symbols based on their tag.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Note
----
This function is written without SQLAlchemy,
so it only tested on Postgres.
"""
syms = self.search_tag(tag)
names = [sym.name for sym in syms]
subs = ["SELECT indx, '{}' AS symbol, final FROM {}".format(s, s) for s in names]
qry = " UNION ALL ".join(subs)
qry = "CREATE VIEW {} AS {};".format(tag, qry)
self.ses.execute("DROP VIEW IF EXISTS {};".format(tag))
self.ses.commit()
self.ses.execute(qry)
self.ses.commit() | python | def build_view_from_tag(self, tag):
"""
Build a view of group of Symbols based on their tag.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Note
----
This function is written without SQLAlchemy,
so it only tested on Postgres.
"""
syms = self.search_tag(tag)
names = [sym.name for sym in syms]
subs = ["SELECT indx, '{}' AS symbol, final FROM {}".format(s, s) for s in names]
qry = " UNION ALL ".join(subs)
qry = "CREATE VIEW {} AS {};".format(tag, qry)
self.ses.execute("DROP VIEW IF EXISTS {};".format(tag))
self.ses.commit()
self.ses.execute(qry)
self.ses.commit() | ['def', 'build_view_from_tag', '(', 'self', ',', 'tag', ')', ':', 'syms', '=', 'self', '.', 'search_tag', '(', 'tag', ')', 'names', '=', '[', 'sym', '.', 'name', 'for', 'sym', 'in', 'syms', ']', 'subs', '=', '[', '"SELECT indx, \'{}\' AS symbol, final FROM {}"', '.', 'format', '(', 's', ',', 's', ')', 'for', 's', 'in', 'names', ']', 'qry', '=', '" UNION ALL "', '.', 'join', '(', 'subs', ')', 'qry', '=', '"CREATE VIEW {} AS {};"', '.', 'format', '(', 'tag', ',', 'qry', ')', 'self', '.', 'ses', '.', 'execute', '(', '"DROP VIEW IF EXISTS {};"', '.', 'format', '(', 'tag', ')', ')', 'self', '.', 'ses', '.', 'commit', '(', ')', 'self', '.', 'ses', '.', 'execute', '(', 'qry', ')', 'self', '.', 'ses', '.', 'commit', '(', ')'] | Build a view of group of Symbols based on their tag.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Note
----
This function is written without SQLAlchemy,
so it only tested on Postgres. | ['Build', 'a', 'view', 'of', 'group', 'of', 'Symbols', 'based', 'on', 'their', 'tag', '.', 'Parameters', '----------', 'tag', ':', 'str', 'Use', '%', 'to', 'enable', 'SQL', 's', 'LIKE', 'functionality', '.', 'Note', '----', 'This', 'function', 'is', 'written', 'without', 'SQLAlchemy', 'so', 'it', 'only', 'tested', 'on', 'Postgres', '.'] | train | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L565-L595 |
6,612 | mfcloud/python-zvm-sdk | smtLayer/powerVM.py | checkIsReachable | def checkIsReachable(rh):
"""
Check if a virtual machine is reachable.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'ISREACHABLE'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
overallRC - 0: determined the status, non-zero: some weird failure
while trying to execute a command
on the guest via IUCV
rc - RC returned from execCmdThruIUCV
rs - 0: not reachable, 1: reachable
"""
rh.printSysLog("Enter powerVM.checkIsReachable, userid: " +
rh.userid)
strCmd = "echo 'ping'"
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": reachable")
reachable = 1
else:
# A failure from execCmdThruIUCV is acceptable way of determining
# that the system is unreachable. We won't pass along the
# error message.
rh.printLn("N", rh.userid + ": unreachable")
reachable = 0
rh.updateResults({"rs": reachable})
rh.printSysLog("Exit powerVM.checkIsReachable, rc: 0")
return 0 | python | def checkIsReachable(rh):
"""
Check if a virtual machine is reachable.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'ISREACHABLE'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
overallRC - 0: determined the status, non-zero: some weird failure
while trying to execute a command
on the guest via IUCV
rc - RC returned from execCmdThruIUCV
rs - 0: not reachable, 1: reachable
"""
rh.printSysLog("Enter powerVM.checkIsReachable, userid: " +
rh.userid)
strCmd = "echo 'ping'"
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": reachable")
reachable = 1
else:
# A failure from execCmdThruIUCV is acceptable way of determining
# that the system is unreachable. We won't pass along the
# error message.
rh.printLn("N", rh.userid + ": unreachable")
reachable = 0
rh.updateResults({"rs": reachable})
rh.printSysLog("Exit powerVM.checkIsReachable, rc: 0")
return 0 | ['def', 'checkIsReachable', '(', 'rh', ')', ':', 'rh', '.', 'printSysLog', '(', '"Enter powerVM.checkIsReachable, userid: "', '+', 'rh', '.', 'userid', ')', 'strCmd', '=', '"echo \'ping\'"', 'results', '=', 'execCmdThruIUCV', '(', 'rh', ',', 'rh', '.', 'userid', ',', 'strCmd', ')', 'if', 'results', '[', "'overallRC'", ']', '==', '0', ':', 'rh', '.', 'printLn', '(', '"N"', ',', 'rh', '.', 'userid', '+', '": reachable"', ')', 'reachable', '=', '1', 'else', ':', '# A failure from execCmdThruIUCV is acceptable way of determining', "# that the system is unreachable. We won't pass along the", '# error message.', 'rh', '.', 'printLn', '(', '"N"', ',', 'rh', '.', 'userid', '+', '": unreachable"', ')', 'reachable', '=', '0', 'rh', '.', 'updateResults', '(', '{', '"rs"', ':', 'reachable', '}', ')', 'rh', '.', 'printSysLog', '(', '"Exit powerVM.checkIsReachable, rc: 0"', ')', 'return', '0'] | Check if a virtual machine is reachable.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'ISREACHABLE'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
overallRC - 0: determined the status, non-zero: some weird failure
while trying to execute a command
on the guest via IUCV
rc - RC returned from execCmdThruIUCV
rs - 0: not reachable, 1: reachable | ['Check', 'if', 'a', 'virtual', 'machine', 'is', 'reachable', '.'] | train | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/powerVM.py#L186-L223 |
6,613 | materialsproject/pymatgen | pymatgen/util/convergence.py | print_plot_line | def print_plot_line(function, popt, xs, ys, name, tol=0.05, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.close()
tol = abs(tol)
line = "plot 'convdat.%s' pointsize 4 lt 0, " % idp
line += '%s lt 3, %s lt 4, %s lt 4, ' % (popt[0], popt[0] - tol, popt[0] + tol)
if function is exponential:
line += "%s + %s * %s ** -x" % (popt[0], popt[1], min(max(1.00001, popt[2]), 1.2))
elif function is reciprocal:
line += "%s + %s / x**%s" % (popt[0], popt[1], min(max(0.5, popt[2]), 6))
elif function is single_reciprocal:
line += "%s + %s / (x - %s)" % (popt[0], popt[1], popt[2])
elif function is simple_reciprocal:
line += "%s + %s / x" % (popt[0], popt[1])
elif function is simple_2reciprocal:
line += "%s + %s / x**2" % (popt[0], popt[1])
elif function is simple_4reciprocal:
line += "%s + %s / x**4" % (popt[0], popt[1])
elif function is simple_5reciprocal:
line += "%s + %s / x**0.5" % (popt[0], popt[1])
else:
print(function, ' no plot ')
with open('plot-fits', mode='a') as f:
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write("set yrange [" + str(popt[0] - 5 * tol) + ':' + str(popt[0] + 5 * tol)+']\n')
f.write(line + '\n')
f.write('pause -1 \n') | python | def print_plot_line(function, popt, xs, ys, name, tol=0.05, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.close()
tol = abs(tol)
line = "plot 'convdat.%s' pointsize 4 lt 0, " % idp
line += '%s lt 3, %s lt 4, %s lt 4, ' % (popt[0], popt[0] - tol, popt[0] + tol)
if function is exponential:
line += "%s + %s * %s ** -x" % (popt[0], popt[1], min(max(1.00001, popt[2]), 1.2))
elif function is reciprocal:
line += "%s + %s / x**%s" % (popt[0], popt[1], min(max(0.5, popt[2]), 6))
elif function is single_reciprocal:
line += "%s + %s / (x - %s)" % (popt[0], popt[1], popt[2])
elif function is simple_reciprocal:
line += "%s + %s / x" % (popt[0], popt[1])
elif function is simple_2reciprocal:
line += "%s + %s / x**2" % (popt[0], popt[1])
elif function is simple_4reciprocal:
line += "%s + %s / x**4" % (popt[0], popt[1])
elif function is simple_5reciprocal:
line += "%s + %s / x**0.5" % (popt[0], popt[1])
else:
print(function, ' no plot ')
with open('plot-fits', mode='a') as f:
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write("set yrange [" + str(popt[0] - 5 * tol) + ':' + str(popt[0] + 5 * tol)+']\n')
f.write(line + '\n')
f.write('pause -1 \n') | ['def', 'print_plot_line', '(', 'function', ',', 'popt', ',', 'xs', ',', 'ys', ',', 'name', ',', 'tol', '=', '0.05', ',', 'extra', '=', "''", ')', ':', 'idp', '=', 'id_generator', '(', ')', 'f', '=', 'open', '(', "'convdat.'", '+', 'str', '(', 'idp', ')', ',', 'mode', '=', "'w'", ')', 'for', 'n', 'in', 'range', '(', '0', ',', 'len', '(', 'ys', ')', ',', '1', ')', ':', 'f', '.', 'write', '(', 'str', '(', 'xs', '[', 'n', ']', ')', '+', "' '", '+', 'str', '(', 'ys', '[', 'n', ']', ')', '+', "'\\n'", ')', 'f', '.', 'close', '(', ')', 'tol', '=', 'abs', '(', 'tol', ')', 'line', '=', '"plot \'convdat.%s\' pointsize 4 lt 0, "', '%', 'idp', 'line', '+=', "'%s lt 3, %s lt 4, %s lt 4, '", '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '0', ']', '-', 'tol', ',', 'popt', '[', '0', ']', '+', 'tol', ')', 'if', 'function', 'is', 'exponential', ':', 'line', '+=', '"%s + %s * %s ** -x"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ',', 'min', '(', 'max', '(', '1.00001', ',', 'popt', '[', '2', ']', ')', ',', '1.2', ')', ')', 'elif', 'function', 'is', 'reciprocal', ':', 'line', '+=', '"%s + %s / x**%s"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ',', 'min', '(', 'max', '(', '0.5', ',', 'popt', '[', '2', ']', ')', ',', '6', ')', ')', 'elif', 'function', 'is', 'single_reciprocal', ':', 'line', '+=', '"%s + %s / (x - %s)"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ',', 'popt', '[', '2', ']', ')', 'elif', 'function', 'is', 'simple_reciprocal', ':', 'line', '+=', '"%s + %s / x"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ')', 'elif', 'function', 'is', 'simple_2reciprocal', ':', 'line', '+=', '"%s + %s / x**2"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ')', 'elif', 'function', 'is', 'simple_4reciprocal', ':', 'line', '+=', '"%s + %s / x**4"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ')', 'elif', 'function', 'is', 'simple_5reciprocal', ':', 'line', '+=', '"%s + %s / x**0.5"', '%', '(', 'popt', '[', '0', ']', ',', 'popt', '[', '1', ']', ')', 'else', ':', 'print', '(', 'function', ',', "' no plot '", ')', 'with', 'open', '(', "'plot-fits'", ',', 'mode', '=', "'a'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', '\'set title "\'', '+', 'name', '+', "' - '", '+', 'extra', '+', '\'"\\n\'', ')', 'f', '.', 'write', '(', '"set output \'"', '+', 'name', '+', "'-'", '+', 'idp', '+', '".gif\'"', '+', "'\\n'", ')', 'f', '.', 'write', '(', '"set yrange ["', '+', 'str', '(', 'popt', '[', '0', ']', '-', '5', '*', 'tol', ')', '+', "':'", '+', 'str', '(', 'popt', '[', '0', ']', '+', '5', '*', 'tol', ')', '+', "']\\n'", ')', 'f', '.', 'write', '(', 'line', '+', "'\\n'", ')', 'f', '.', 'write', '(', "'pause -1 \\n'", ')'] | print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters | ['print', 'the', 'gnuplot', 'command', 'line', 'to', 'plot', 'the', 'x', 'y', 'data', 'with', 'the', 'fitted', 'function', 'using', 'the', 'popt', 'parameters'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/convergence.py#L381-L415 |
6,614 | tensorflow/lucid | lucid/misc/io/serialize_array.py | _normalize_array | def _normalize_array(array, domain=(0, 1)):
"""Given an arbitrary rank-3 NumPy array, produce one representing an image.
This ensures the resulting array has a dtype of uint8 and a domain of 0-255.
Args:
array: NumPy array representing the image
domain: expected range of values in array,
defaults to (0, 1), if explicitly set to None will use the array's
own range of values and normalize them.
Returns:
normalized PIL.Image
"""
# first copy the input so we're never mutating the user's data
array = np.array(array)
# squeeze helps both with batch=1 and B/W and PIL's mode inference
array = np.squeeze(array)
assert len(array.shape) <= 3
assert np.issubdtype(array.dtype, np.number)
assert not np.isnan(array).any()
low, high = np.min(array), np.max(array)
if domain is None:
message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)"
log.debug(message, low, high)
domain = (low, high)
# clip values if domain was specified and array contains values outside of it
if low < domain[0] or high > domain[1]:
message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})."
log.info(message.format(low, high, domain[0], domain[1]))
array = array.clip(*domain)
min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255
# convert signed to unsigned if needed
if np.issubdtype(array.dtype, np.inexact):
offset = domain[0]
if offset != 0:
array -= offset
log.debug("Converting inexact array by subtracting -%.2f.", offset)
scalar = max_value / (domain[1] - domain[0])
if scalar != 1:
array *= scalar
log.debug("Converting inexact array by scaling by %.2f.", scalar)
return array.clip(min_value, max_value).astype(np.uint8) | python | def _normalize_array(array, domain=(0, 1)):
"""Given an arbitrary rank-3 NumPy array, produce one representing an image.
This ensures the resulting array has a dtype of uint8 and a domain of 0-255.
Args:
array: NumPy array representing the image
domain: expected range of values in array,
defaults to (0, 1), if explicitly set to None will use the array's
own range of values and normalize them.
Returns:
normalized PIL.Image
"""
# first copy the input so we're never mutating the user's data
array = np.array(array)
# squeeze helps both with batch=1 and B/W and PIL's mode inference
array = np.squeeze(array)
assert len(array.shape) <= 3
assert np.issubdtype(array.dtype, np.number)
assert not np.isnan(array).any()
low, high = np.min(array), np.max(array)
if domain is None:
message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)"
log.debug(message, low, high)
domain = (low, high)
# clip values if domain was specified and array contains values outside of it
if low < domain[0] or high > domain[1]:
message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})."
log.info(message.format(low, high, domain[0], domain[1]))
array = array.clip(*domain)
min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255
# convert signed to unsigned if needed
if np.issubdtype(array.dtype, np.inexact):
offset = domain[0]
if offset != 0:
array -= offset
log.debug("Converting inexact array by subtracting -%.2f.", offset)
scalar = max_value / (domain[1] - domain[0])
if scalar != 1:
array *= scalar
log.debug("Converting inexact array by scaling by %.2f.", scalar)
return array.clip(min_value, max_value).astype(np.uint8) | ['def', '_normalize_array', '(', 'array', ',', 'domain', '=', '(', '0', ',', '1', ')', ')', ':', "# first copy the input so we're never mutating the user's data", 'array', '=', 'np', '.', 'array', '(', 'array', ')', "# squeeze helps both with batch=1 and B/W and PIL's mode inference", 'array', '=', 'np', '.', 'squeeze', '(', 'array', ')', 'assert', 'len', '(', 'array', '.', 'shape', ')', '<=', '3', 'assert', 'np', '.', 'issubdtype', '(', 'array', '.', 'dtype', ',', 'np', '.', 'number', ')', 'assert', 'not', 'np', '.', 'isnan', '(', 'array', ')', '.', 'any', '(', ')', 'low', ',', 'high', '=', 'np', '.', 'min', '(', 'array', ')', ',', 'np', '.', 'max', '(', 'array', ')', 'if', 'domain', 'is', 'None', ':', 'message', '=', '"No domain specified, normalizing from measured (~%.2f, ~%.2f)"', 'log', '.', 'debug', '(', 'message', ',', 'low', ',', 'high', ')', 'domain', '=', '(', 'low', ',', 'high', ')', '# clip values if domain was specified and array contains values outside of it', 'if', 'low', '<', 'domain', '[', '0', ']', 'or', 'high', '>', 'domain', '[', '1', ']', ':', 'message', '=', '"Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})."', 'log', '.', 'info', '(', 'message', '.', 'format', '(', 'low', ',', 'high', ',', 'domain', '[', '0', ']', ',', 'domain', '[', '1', ']', ')', ')', 'array', '=', 'array', '.', 'clip', '(', '*', 'domain', ')', 'min_value', ',', 'max_value', '=', 'np', '.', 'iinfo', '(', 'np', '.', 'uint8', ')', '.', 'min', ',', 'np', '.', 'iinfo', '(', 'np', '.', 'uint8', ')', '.', 'max', '# 0, 255', '# convert signed to unsigned if needed', 'if', 'np', '.', 'issubdtype', '(', 'array', '.', 'dtype', ',', 'np', '.', 'inexact', ')', ':', 'offset', '=', 'domain', '[', '0', ']', 'if', 'offset', '!=', '0', ':', 'array', '-=', 'offset', 'log', '.', 'debug', '(', '"Converting inexact array by subtracting -%.2f."', ',', 'offset', ')', 'scalar', '=', 'max_value', '/', '(', 'domain', '[', '1', ']', '-', 'domain', '[', '0', ']', ')', 'if', 'scalar', '!=', '1', ':', 'array', '*=', 'scalar', 'log', '.', 'debug', '(', '"Converting inexact array by scaling by %.2f."', ',', 'scalar', ')', 'return', 'array', '.', 'clip', '(', 'min_value', ',', 'max_value', ')', '.', 'astype', '(', 'np', '.', 'uint8', ')'] | Given an arbitrary rank-3 NumPy array, produce one representing an image.
This ensures the resulting array has a dtype of uint8 and a domain of 0-255.
Args:
array: NumPy array representing the image
domain: expected range of values in array,
defaults to (0, 1), if explicitly set to None will use the array's
own range of values and normalize them.
Returns:
normalized PIL.Image | ['Given', 'an', 'arbitrary', 'rank', '-', '3', 'NumPy', 'array', 'produce', 'one', 'representing', 'an', 'image', '.'] | train | https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L31-L77 |
6,615 | hthiery/python-fritzhome | pyfritzhome/cli.py | main | def main(args=None):
"""The main function."""
parser = argparse.ArgumentParser(
description='Fritz!Box Smarthome CLI tool.')
parser.add_argument('-v', action='store_true', dest='verbose',
help='be more verbose')
parser.add_argument('-f', '--fritzbox', type=str, dest='host',
help='Fritz!Box IP address', default='fritz.box')
parser.add_argument('-u', '--user', type=str, dest='user',
help='Username')
parser.add_argument('-p', '--password', type=str, dest='password',
help='Username')
parser.add_argument('-a', '--ain', type=str, dest='ain',
help='Actor Identification', default=None)
parser.add_argument('-V', '--version', action='version',
version='{version}'.format(version=__version__),
help='Print version')
_sub = parser.add_subparsers(title='Commands')
# list all devices
subparser = _sub.add_parser('list', help='List all available devices')
subparser.set_defaults(func=list_all)
# device
subparser = _sub.add_parser('device', help='Device/Actor commands')
_sub_switch = subparser.add_subparsers()
# device name
subparser = _sub_switch.add_parser('name', help='get the device name')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_name)
# device presence
subparser = _sub_switch.add_parser('present',
help='get the device presence')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_presence)
# device stats
subparser = _sub_switch.add_parser('stats',
help='get the device statistics')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_statistics)
# switch
subparser = _sub.add_parser('switch', help='Switch commands')
_sub_switch = subparser.add_subparsers()
# switch get
subparser = _sub_switch.add_parser('get', help='get state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_get)
# switch on
subparser = _sub_switch.add_parser('on', help='set on state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_on)
# switch off
subparser = _sub_switch.add_parser('off', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_off)
# switch toggle
subparser = _sub_switch.add_parser('toggle', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_toggle)
args = parser.parse_args(args)
logging.basicConfig()
if args.verbose:
logging.getLogger('pyfritzhome').setLevel(logging.DEBUG)
fritzbox = None
try:
fritzbox = Fritzhome(host=args.host, user=args.user,
password=args.password)
fritzbox.login()
args.func(fritzbox, args)
finally:
if fritzbox is not None:
fritzbox.logout() | python | def main(args=None):
"""The main function."""
parser = argparse.ArgumentParser(
description='Fritz!Box Smarthome CLI tool.')
parser.add_argument('-v', action='store_true', dest='verbose',
help='be more verbose')
parser.add_argument('-f', '--fritzbox', type=str, dest='host',
help='Fritz!Box IP address', default='fritz.box')
parser.add_argument('-u', '--user', type=str, dest='user',
help='Username')
parser.add_argument('-p', '--password', type=str, dest='password',
help='Username')
parser.add_argument('-a', '--ain', type=str, dest='ain',
help='Actor Identification', default=None)
parser.add_argument('-V', '--version', action='version',
version='{version}'.format(version=__version__),
help='Print version')
_sub = parser.add_subparsers(title='Commands')
# list all devices
subparser = _sub.add_parser('list', help='List all available devices')
subparser.set_defaults(func=list_all)
# device
subparser = _sub.add_parser('device', help='Device/Actor commands')
_sub_switch = subparser.add_subparsers()
# device name
subparser = _sub_switch.add_parser('name', help='get the device name')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_name)
# device presence
subparser = _sub_switch.add_parser('present',
help='get the device presence')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_presence)
# device stats
subparser = _sub_switch.add_parser('stats',
help='get the device statistics')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_statistics)
# switch
subparser = _sub.add_parser('switch', help='Switch commands')
_sub_switch = subparser.add_subparsers()
# switch get
subparser = _sub_switch.add_parser('get', help='get state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_get)
# switch on
subparser = _sub_switch.add_parser('on', help='set on state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_on)
# switch off
subparser = _sub_switch.add_parser('off', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_off)
# switch toggle
subparser = _sub_switch.add_parser('toggle', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_toggle)
args = parser.parse_args(args)
logging.basicConfig()
if args.verbose:
logging.getLogger('pyfritzhome').setLevel(logging.DEBUG)
fritzbox = None
try:
fritzbox = Fritzhome(host=args.host, user=args.user,
password=args.password)
fritzbox.login()
args.func(fritzbox, args)
finally:
if fritzbox is not None:
fritzbox.logout() | ['def', 'main', '(', 'args', '=', 'None', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Fritz!Box Smarthome CLI tool.'", ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'verbose'", ',', 'help', '=', "'be more verbose'", ')', 'parser', '.', 'add_argument', '(', "'-f'", ',', "'--fritzbox'", ',', 'type', '=', 'str', ',', 'dest', '=', "'host'", ',', 'help', '=', "'Fritz!Box IP address'", ',', 'default', '=', "'fritz.box'", ')', 'parser', '.', 'add_argument', '(', "'-u'", ',', "'--user'", ',', 'type', '=', 'str', ',', 'dest', '=', "'user'", ',', 'help', '=', "'Username'", ')', 'parser', '.', 'add_argument', '(', "'-p'", ',', "'--password'", ',', 'type', '=', 'str', ',', 'dest', '=', "'password'", ',', 'help', '=', "'Username'", ')', 'parser', '.', 'add_argument', '(', "'-a'", ',', "'--ain'", ',', 'type', '=', 'str', ',', 'dest', '=', "'ain'", ',', 'help', '=', "'Actor Identification'", ',', 'default', '=', 'None', ')', 'parser', '.', 'add_argument', '(', "'-V'", ',', "'--version'", ',', 'action', '=', "'version'", ',', 'version', '=', "'{version}'", '.', 'format', '(', 'version', '=', '__version__', ')', ',', 'help', '=', "'Print version'", ')', '_sub', '=', 'parser', '.', 'add_subparsers', '(', 'title', '=', "'Commands'", ')', '# list all devices', 'subparser', '=', '_sub', '.', 'add_parser', '(', "'list'", ',', 'help', '=', "'List all available devices'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'list_all', ')', '# device', 'subparser', '=', '_sub', '.', 'add_parser', '(', "'device'", ',', 'help', '=', "'Device/Actor commands'", ')', '_sub_switch', '=', 'subparser', '.', 'add_subparsers', '(', ')', '# device name', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'name'", ',', 'help', '=', "'get the device name'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'device_name', ')', '# device presence', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'present'", ',', 'help', '=', "'get the device presence'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'device_presence', ')', '# device stats', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'stats'", ',', 'help', '=', "'get the device statistics'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'device_statistics', ')', '# switch', 'subparser', '=', '_sub', '.', 'add_parser', '(', "'switch'", ',', 'help', '=', "'Switch commands'", ')', '_sub_switch', '=', 'subparser', '.', 'add_subparsers', '(', ')', '# switch get', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'get'", ',', 'help', '=', "'get state'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'switch_get', ')', '# switch on', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'on'", ',', 'help', '=', "'set on state'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'switch_on', ')', '# switch off', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'off'", ',', 'help', '=', "'set off state'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'switch_off', ')', '# switch toggle', 'subparser', '=', '_sub_switch', '.', 'add_parser', '(', "'toggle'", ',', 'help', '=', "'set off state'", ')', 'subparser', '.', 'add_argument', '(', "'ain'", ',', 'type', '=', 'str', ',', 'metavar', '=', '"AIN"', ',', 'help', '=', "'Actor Identification'", ')', 'subparser', '.', 'set_defaults', '(', 'func', '=', 'switch_toggle', ')', 'args', '=', 'parser', '.', 'parse_args', '(', 'args', ')', 'logging', '.', 'basicConfig', '(', ')', 'if', 'args', '.', 'verbose', ':', 'logging', '.', 'getLogger', '(', "'pyfritzhome'", ')', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'fritzbox', '=', 'None', 'try', ':', 'fritzbox', '=', 'Fritzhome', '(', 'host', '=', 'args', '.', 'host', ',', 'user', '=', 'args', '.', 'user', ',', 'password', '=', 'args', '.', 'password', ')', 'fritzbox', '.', 'login', '(', ')', 'args', '.', 'func', '(', 'fritzbox', ',', 'args', ')', 'finally', ':', 'if', 'fritzbox', 'is', 'not', 'None', ':', 'fritzbox', '.', 'logout', '(', ')'] | The main function. | ['The', 'main', 'function', '.'] | train | https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/cli.py#L100-L190 |
6,616 | mardix/flask-cloudy | flask_cloudy.py | Object.download_url | def download_url(self, timeout=60, name=None):
"""
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
"""
if "local" in self.driver.name.lower():
return url_for(SERVER_ENDPOINT,
object_name=self.name,
dl=1,
name=name,
_external=True)
else:
driver_name = self.driver.name.lower()
expires = (datetime.datetime.now()
+ datetime.timedelta(seconds=timeout)).strftime("%s")
if 's3' in driver_name or 'google' in driver_name:
s2s = "GET\n\n\n{expires}\n/{object_name}"\
.format(expires=expires, object_name=self.path)
h = hmac.new(self.driver.secret.encode('utf-8'), s2s.encode('utf-8'), hashlib.sha1)
s = base64.encodestring(h.digest()).strip()
_keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId"
params = {
_keyIdName: self.driver.key,
"Expires": expires,
"Signature": s
}
urlkv = urlencode(params)
return "%s?%s" % (self.secure_url, urlkv)
elif 'cloudfiles' in driver_name:
return self.driver.ex_get_object_temp_url(self._obj,
method="GET",
timeout=expires)
else:
raise NotImplemented("This provider '%s' doesn't support or "
"doesn't have a signed url "
"implemented yet" % self.provider_name) | python | def download_url(self, timeout=60, name=None):
"""
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
"""
if "local" in self.driver.name.lower():
return url_for(SERVER_ENDPOINT,
object_name=self.name,
dl=1,
name=name,
_external=True)
else:
driver_name = self.driver.name.lower()
expires = (datetime.datetime.now()
+ datetime.timedelta(seconds=timeout)).strftime("%s")
if 's3' in driver_name or 'google' in driver_name:
s2s = "GET\n\n\n{expires}\n/{object_name}"\
.format(expires=expires, object_name=self.path)
h = hmac.new(self.driver.secret.encode('utf-8'), s2s.encode('utf-8'), hashlib.sha1)
s = base64.encodestring(h.digest()).strip()
_keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId"
params = {
_keyIdName: self.driver.key,
"Expires": expires,
"Signature": s
}
urlkv = urlencode(params)
return "%s?%s" % (self.secure_url, urlkv)
elif 'cloudfiles' in driver_name:
return self.driver.ex_get_object_temp_url(self._obj,
method="GET",
timeout=expires)
else:
raise NotImplemented("This provider '%s' doesn't support or "
"doesn't have a signed url "
"implemented yet" % self.provider_name) | ['def', 'download_url', '(', 'self', ',', 'timeout', '=', '60', ',', 'name', '=', 'None', ')', ':', 'if', '"local"', 'in', 'self', '.', 'driver', '.', 'name', '.', 'lower', '(', ')', ':', 'return', 'url_for', '(', 'SERVER_ENDPOINT', ',', 'object_name', '=', 'self', '.', 'name', ',', 'dl', '=', '1', ',', 'name', '=', 'name', ',', '_external', '=', 'True', ')', 'else', ':', 'driver_name', '=', 'self', '.', 'driver', '.', 'name', '.', 'lower', '(', ')', 'expires', '=', '(', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '+', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'timeout', ')', ')', '.', 'strftime', '(', '"%s"', ')', 'if', "'s3'", 'in', 'driver_name', 'or', "'google'", 'in', 'driver_name', ':', 's2s', '=', '"GET\\n\\n\\n{expires}\\n/{object_name}"', '.', 'format', '(', 'expires', '=', 'expires', ',', 'object_name', '=', 'self', '.', 'path', ')', 'h', '=', 'hmac', '.', 'new', '(', 'self', '.', 'driver', '.', 'secret', '.', 'encode', '(', "'utf-8'", ')', ',', 's2s', '.', 'encode', '(', "'utf-8'", ')', ',', 'hashlib', '.', 'sha1', ')', 's', '=', 'base64', '.', 'encodestring', '(', 'h', '.', 'digest', '(', ')', ')', '.', 'strip', '(', ')', '_keyIdName', '=', '"AWSAccessKeyId"', 'if', '"s3"', 'in', 'driver_name', 'else', '"GoogleAccessId"', 'params', '=', '{', '_keyIdName', ':', 'self', '.', 'driver', '.', 'key', ',', '"Expires"', ':', 'expires', ',', '"Signature"', ':', 's', '}', 'urlkv', '=', 'urlencode', '(', 'params', ')', 'return', '"%s?%s"', '%', '(', 'self', '.', 'secure_url', ',', 'urlkv', ')', 'elif', "'cloudfiles'", 'in', 'driver_name', ':', 'return', 'self', '.', 'driver', '.', 'ex_get_object_temp_url', '(', 'self', '.', '_obj', ',', 'method', '=', '"GET"', ',', 'timeout', '=', 'expires', ')', 'else', ':', 'raise', 'NotImplemented', '(', '"This provider \'%s\' doesn\'t support or "', '"doesn\'t have a signed url "', '"implemented yet"', '%', 'self', '.', 'provider_name', ')'] | Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str | ['Trigger', 'a', 'browse', 'download', ':', 'param', 'timeout', ':', 'int', '-', 'Time', 'in', 'seconds', 'to', 'expire', 'the', 'download', ':', 'param', 'name', ':', 'str', '-', 'for', 'LOCAL', 'only', 'to', 'rename', 'the', 'file', 'being', 'downloaded', ':', 'return', ':', 'str'] | train | https://github.com/mardix/flask-cloudy/blob/8085d8fbbafec6c358f0d307bfcb795de50d4acb/flask_cloudy.py#L626-L666 |
6,617 | PyThaiNLP/pythainlp | pythainlp/tokenize/tcc.py | tcc | def tcc(text: str) -> str:
"""
TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subword (character cluster)
"""
if not text or not isinstance(text, str):
return ""
p = 0
while p < len(text):
m = PAT_TCC.match(text[p:])
if m:
n = m.span()[1]
else:
n = 1
yield text[p : p + n]
p += n | python | def tcc(text: str) -> str:
"""
TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subword (character cluster)
"""
if not text or not isinstance(text, str):
return ""
p = 0
while p < len(text):
m = PAT_TCC.match(text[p:])
if m:
n = m.span()[1]
else:
n = 1
yield text[p : p + n]
p += n | ['def', 'tcc', '(', 'text', ':', 'str', ')', '->', 'str', ':', 'if', 'not', 'text', 'or', 'not', 'isinstance', '(', 'text', ',', 'str', ')', ':', 'return', '""', 'p', '=', '0', 'while', 'p', '<', 'len', '(', 'text', ')', ':', 'm', '=', 'PAT_TCC', '.', 'match', '(', 'text', '[', 'p', ':', ']', ')', 'if', 'm', ':', 'n', '=', 'm', '.', 'span', '(', ')', '[', '1', ']', 'else', ':', 'n', '=', '1', 'yield', 'text', '[', 'p', ':', 'p', '+', 'n', ']', 'p', '+=', 'n'] | TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subword (character cluster) | ['TCC', 'generator', 'generates', 'Thai', 'Character', 'Clusters', ':', 'param', 'str', 'text', ':', 'text', 'to', 'be', 'tokenized', 'to', 'character', 'clusters', ':', 'return', ':', 'subword', '(', 'character', 'cluster', ')'] | train | https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/tokenize/tcc.py#L52-L69 |
6,618 | materialsproject/pymatgen-db | matgendb/builders/incr.py | CollectionTracker.save | def save(self, mark):
"""Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection
"""
self._check_exists()
obj = mark.as_dict()
try:
# Make a 'filter' to find/update existing record, which uses
# the field name and operation (but not the position).
filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)}
_log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj))
self._track.update(filt, obj, upsert=True)
except pymongo.errors.PyMongoError as err:
raise DBError("{}".format(err)) | python | def save(self, mark):
"""Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection
"""
self._check_exists()
obj = mark.as_dict()
try:
# Make a 'filter' to find/update existing record, which uses
# the field name and operation (but not the position).
filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)}
_log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj))
self._track.update(filt, obj, upsert=True)
except pymongo.errors.PyMongoError as err:
raise DBError("{}".format(err)) | ['def', 'save', '(', 'self', ',', 'mark', ')', ':', 'self', '.', '_check_exists', '(', ')', 'obj', '=', 'mark', '.', 'as_dict', '(', ')', 'try', ':', "# Make a 'filter' to find/update existing record, which uses", '# the field name and operation (but not the position).', 'filt', '=', '{', 'k', ':', 'obj', '[', 'k', ']', 'for', 'k', 'in', '(', 'mark', '.', 'FLD_FLD', ',', 'mark', '.', 'FLD_OP', ')', '}', '_log', '.', 'debug', '(', '"save: upsert-spec={} upsert-obj={}"', '.', 'format', '(', 'filt', ',', 'obj', ')', ')', 'self', '.', '_track', '.', 'update', '(', 'filt', ',', 'obj', ',', 'upsert', '=', 'True', ')', 'except', 'pymongo', '.', 'errors', '.', 'PyMongoError', 'as', 'err', ':', 'raise', 'DBError', '(', '"{}"', '.', 'format', '(', 'err', ')', ')'] | Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection | ['Save', 'a', 'position', 'in', 'this', 'collection', '.'] | train | https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/builders/incr.py#L355-L371 |
6,619 | pycontribs/pyrax | pyrax/base_identity.py | BaseIdentity.unauthenticate | def unauthenticate(self):
"""
Clears out any credentials, tokens, and service catalog info.
"""
self.username = ""
self.password = ""
self.tenant_id = ""
self.tenant_name = ""
self.token = ""
self.expires = None
self.region = ""
self._creds_file = None
self.api_key = ""
self.services = utils.DotDict()
self.regions = utils.DotDict()
self.authenticated = False | python | def unauthenticate(self):
"""
Clears out any credentials, tokens, and service catalog info.
"""
self.username = ""
self.password = ""
self.tenant_id = ""
self.tenant_name = ""
self.token = ""
self.expires = None
self.region = ""
self._creds_file = None
self.api_key = ""
self.services = utils.DotDict()
self.regions = utils.DotDict()
self.authenticated = False | ['def', 'unauthenticate', '(', 'self', ')', ':', 'self', '.', 'username', '=', '""', 'self', '.', 'password', '=', '""', 'self', '.', 'tenant_id', '=', '""', 'self', '.', 'tenant_name', '=', '""', 'self', '.', 'token', '=', '""', 'self', '.', 'expires', '=', 'None', 'self', '.', 'region', '=', '""', 'self', '.', '_creds_file', '=', 'None', 'self', '.', 'api_key', '=', '""', 'self', '.', 'services', '=', 'utils', '.', 'DotDict', '(', ')', 'self', '.', 'regions', '=', 'utils', '.', 'DotDict', '(', ')', 'self', '.', 'authenticated', '=', 'False'] | Clears out any credentials, tokens, and service catalog info. | ['Clears', 'out', 'any', 'credentials', 'tokens', 'and', 'service', 'catalog', 'info', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/base_identity.py#L694-L709 |
6,620 | jonathf/chaospy | chaospy/distributions/sampler/sequences/halton.py | create_halton_samples | def create_halton_samples(order, dim=1, burnin=-1, primes=()):
"""
Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``.
"""
primes = list(primes)
if not primes:
prime_order = 10*dim
while len(primes) < dim:
primes = create_primes(prime_order)
prime_order *= 2
primes = primes[:dim]
assert len(primes) == dim, "not enough primes"
if burnin < 0:
burnin = max(primes)
out = numpy.empty((dim, order))
indices = [idx+burnin for idx in range(order)]
for dim_ in range(dim):
out[dim_] = create_van_der_corput_samples(
indices, number_base=primes[dim_])
return out | python | def create_halton_samples(order, dim=1, burnin=-1, primes=()):
"""
Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``.
"""
primes = list(primes)
if not primes:
prime_order = 10*dim
while len(primes) < dim:
primes = create_primes(prime_order)
prime_order *= 2
primes = primes[:dim]
assert len(primes) == dim, "not enough primes"
if burnin < 0:
burnin = max(primes)
out = numpy.empty((dim, order))
indices = [idx+burnin for idx in range(order)]
for dim_ in range(dim):
out[dim_] = create_van_der_corput_samples(
indices, number_base=primes[dim_])
return out | ['def', 'create_halton_samples', '(', 'order', ',', 'dim', '=', '1', ',', 'burnin', '=', '-', '1', ',', 'primes', '=', '(', ')', ')', ':', 'primes', '=', 'list', '(', 'primes', ')', 'if', 'not', 'primes', ':', 'prime_order', '=', '10', '*', 'dim', 'while', 'len', '(', 'primes', ')', '<', 'dim', ':', 'primes', '=', 'create_primes', '(', 'prime_order', ')', 'prime_order', '*=', '2', 'primes', '=', 'primes', '[', ':', 'dim', ']', 'assert', 'len', '(', 'primes', ')', '==', 'dim', ',', '"not enough primes"', 'if', 'burnin', '<', '0', ':', 'burnin', '=', 'max', '(', 'primes', ')', 'out', '=', 'numpy', '.', 'empty', '(', '(', 'dim', ',', 'order', ')', ')', 'indices', '=', '[', 'idx', '+', 'burnin', 'for', 'idx', 'in', 'range', '(', 'order', ')', ']', 'for', 'dim_', 'in', 'range', '(', 'dim', ')', ':', 'out', '[', 'dim_', ']', '=', 'create_van_der_corput_samples', '(', 'indices', ',', 'number_base', '=', 'primes', '[', 'dim_', ']', ')', 'return', 'out'] | Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``. | ['Create', 'Halton', 'sequence', '.'] | train | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/sampler/sequences/halton.py#L34-L72 |
6,621 | gwpy/gwpy | gwpy/types/io/hdf5.py | write_hdf5_array | def write_hdf5_array(array, h5g, path=None, attrs=None,
append=False, overwrite=False,
compression='gzip', **kwargs):
"""Write the ``array`` to an `h5py.Dataset`
Parameters
----------
array : `gwpy.types.Array`
the data object to write
h5g : `str`, `h5py.Group`
a file path to write to, or an `h5py.Group` in which to create
a new dataset
path : `str`, optional
the path inside the group at which to create the new dataset,
defaults to ``array.name``
attrs : `dict`, optional
extra metadata to write into `h5py.Dataset.attrs`, on top of
the default metadata
append : `bool`, default: `False`
if `True`, write new dataset to existing file, otherwise an
exception will be raised if the output file exists (only used if
``f`` is `str`)
overwrite : `bool`, default: `False`
if `True`, overwrite an existing dataset in an existing file,
otherwise an exception will be raised if a dataset exists with
the given name (only used if ``f`` is `str`)
compression : `str`, `int`, optional
compression option to pass to :meth:`h5py.Group.create_dataset`
**kwargs
other keyword arguments for :meth:`h5py.Group.create_dataset`
Returns
-------
datasets : `h5py.Dataset`
the newly created dataset
"""
if path is None:
path = array.name
if path is None:
raise ValueError("Cannot determine HDF5 path for %s, "
"please set ``name`` attribute, or pass ``path=`` "
"keyword when writing" % type(array).__name__)
# create dataset
dset = io_hdf5.create_dataset(h5g, path, overwrite=overwrite,
data=array.value, compression=compression,
**kwargs)
# write default metadata
write_array_metadata(dset, array)
# allow caller to specify their own metadata dict
if attrs:
for key in attrs:
dset.attrs[key] = attrs[key]
return dset | python | def write_hdf5_array(array, h5g, path=None, attrs=None,
append=False, overwrite=False,
compression='gzip', **kwargs):
"""Write the ``array`` to an `h5py.Dataset`
Parameters
----------
array : `gwpy.types.Array`
the data object to write
h5g : `str`, `h5py.Group`
a file path to write to, or an `h5py.Group` in which to create
a new dataset
path : `str`, optional
the path inside the group at which to create the new dataset,
defaults to ``array.name``
attrs : `dict`, optional
extra metadata to write into `h5py.Dataset.attrs`, on top of
the default metadata
append : `bool`, default: `False`
if `True`, write new dataset to existing file, otherwise an
exception will be raised if the output file exists (only used if
``f`` is `str`)
overwrite : `bool`, default: `False`
if `True`, overwrite an existing dataset in an existing file,
otherwise an exception will be raised if a dataset exists with
the given name (only used if ``f`` is `str`)
compression : `str`, `int`, optional
compression option to pass to :meth:`h5py.Group.create_dataset`
**kwargs
other keyword arguments for :meth:`h5py.Group.create_dataset`
Returns
-------
datasets : `h5py.Dataset`
the newly created dataset
"""
if path is None:
path = array.name
if path is None:
raise ValueError("Cannot determine HDF5 path for %s, "
"please set ``name`` attribute, or pass ``path=`` "
"keyword when writing" % type(array).__name__)
# create dataset
dset = io_hdf5.create_dataset(h5g, path, overwrite=overwrite,
data=array.value, compression=compression,
**kwargs)
# write default metadata
write_array_metadata(dset, array)
# allow caller to specify their own metadata dict
if attrs:
for key in attrs:
dset.attrs[key] = attrs[key]
return dset | ['def', 'write_hdf5_array', '(', 'array', ',', 'h5g', ',', 'path', '=', 'None', ',', 'attrs', '=', 'None', ',', 'append', '=', 'False', ',', 'overwrite', '=', 'False', ',', 'compression', '=', "'gzip'", ',', '*', '*', 'kwargs', ')', ':', 'if', 'path', 'is', 'None', ':', 'path', '=', 'array', '.', 'name', 'if', 'path', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Cannot determine HDF5 path for %s, "', '"please set ``name`` attribute, or pass ``path=`` "', '"keyword when writing"', '%', 'type', '(', 'array', ')', '.', '__name__', ')', '# create dataset', 'dset', '=', 'io_hdf5', '.', 'create_dataset', '(', 'h5g', ',', 'path', ',', 'overwrite', '=', 'overwrite', ',', 'data', '=', 'array', '.', 'value', ',', 'compression', '=', 'compression', ',', '*', '*', 'kwargs', ')', '# write default metadata', 'write_array_metadata', '(', 'dset', ',', 'array', ')', '# allow caller to specify their own metadata dict', 'if', 'attrs', ':', 'for', 'key', 'in', 'attrs', ':', 'dset', '.', 'attrs', '[', 'key', ']', '=', 'attrs', '[', 'key', ']', 'return', 'dset'] | Write the ``array`` to an `h5py.Dataset`
Parameters
----------
array : `gwpy.types.Array`
the data object to write
h5g : `str`, `h5py.Group`
a file path to write to, or an `h5py.Group` in which to create
a new dataset
path : `str`, optional
the path inside the group at which to create the new dataset,
defaults to ``array.name``
attrs : `dict`, optional
extra metadata to write into `h5py.Dataset.attrs`, on top of
the default metadata
append : `bool`, default: `False`
if `True`, write new dataset to existing file, otherwise an
exception will be raised if the output file exists (only used if
``f`` is `str`)
overwrite : `bool`, default: `False`
if `True`, overwrite an existing dataset in an existing file,
otherwise an exception will be raised if a dataset exists with
the given name (only used if ``f`` is `str`)
compression : `str`, `int`, optional
compression option to pass to :meth:`h5py.Group.create_dataset`
**kwargs
other keyword arguments for :meth:`h5py.Group.create_dataset`
Returns
-------
datasets : `h5py.Dataset`
the newly created dataset | ['Write', 'the', 'array', 'to', 'an', 'h5py', '.', 'Dataset'] | train | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L135-L198 |
6,622 | iskandr/serializable | serializable/helpers.py | to_dict | def to_dict(obj):
"""
If value wasn't isn't a primitive scalar or collection then it needs to
either implement to_dict (instances of Serializable) or has member
data matching each required arg of __init__.
"""
if isinstance(obj, dict):
return obj
elif hasattr(obj, "to_dict"):
return obj.to_dict()
try:
return simple_object_to_dict(obj)
except:
raise ValueError(
"Cannot convert %s : %s to dictionary" % (
obj, type(obj))) | python | def to_dict(obj):
"""
If value wasn't isn't a primitive scalar or collection then it needs to
either implement to_dict (instances of Serializable) or has member
data matching each required arg of __init__.
"""
if isinstance(obj, dict):
return obj
elif hasattr(obj, "to_dict"):
return obj.to_dict()
try:
return simple_object_to_dict(obj)
except:
raise ValueError(
"Cannot convert %s : %s to dictionary" % (
obj, type(obj))) | ['def', 'to_dict', '(', 'obj', ')', ':', 'if', 'isinstance', '(', 'obj', ',', 'dict', ')', ':', 'return', 'obj', 'elif', 'hasattr', '(', 'obj', ',', '"to_dict"', ')', ':', 'return', 'obj', '.', 'to_dict', '(', ')', 'try', ':', 'return', 'simple_object_to_dict', '(', 'obj', ')', 'except', ':', 'raise', 'ValueError', '(', '"Cannot convert %s : %s to dictionary"', '%', '(', 'obj', ',', 'type', '(', 'obj', ')', ')', ')'] | If value wasn't isn't a primitive scalar or collection then it needs to
either implement to_dict (instances of Serializable) or has member
data matching each required arg of __init__. | ['If', 'value', 'wasn', 't', 'isn', 't', 'a', 'primitive', 'scalar', 'or', 'collection', 'then', 'it', 'needs', 'to', 'either', 'implement', 'to_dict', '(', 'instances', 'of', 'Serializable', ')', 'or', 'has', 'member', 'data', 'matching', 'each', 'required', 'arg', 'of', '__init__', '.'] | train | https://github.com/iskandr/serializable/blob/6807dfd582567b3bda609910806b7429d8d53b44/serializable/helpers.py#L229-L244 |
6,623 | Unity-Technologies/ml-agents | ml-agents/mlagents/trainers/barracuda.py | rnn | def rnn(name, input, state, kernel, bias, new_state, number_of_gates = 2):
''' - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
'''
nn = Build(name)
nn.tanh(
nn.mad(kernel=kernel, bias=bias,
x=nn.concat(input, state)),
out=new_state);
return nn.layers; | python | def rnn(name, input, state, kernel, bias, new_state, number_of_gates = 2):
''' - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
'''
nn = Build(name)
nn.tanh(
nn.mad(kernel=kernel, bias=bias,
x=nn.concat(input, state)),
out=new_state);
return nn.layers; | ['def', 'rnn', '(', 'name', ',', 'input', ',', 'state', ',', 'kernel', ',', 'bias', ',', 'new_state', ',', 'number_of_gates', '=', '2', ')', ':', 'nn', '=', 'Build', '(', 'name', ')', 'nn', '.', 'tanh', '(', 'nn', '.', 'mad', '(', 'kernel', '=', 'kernel', ',', 'bias', '=', 'bias', ',', 'x', '=', 'nn', '.', 'concat', '(', 'input', ',', 'state', ')', ')', ',', 'out', '=', 'new_state', ')', 'return', 'nn', '.', 'layers'] | - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi) | ['-', 'Ht', '=', 'f', '(', 'Xt', '*', 'Wi', '+', 'Ht_1', '*', 'Ri', '+', 'Wbi', '+', 'Rbi', ')'] | train | https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/barracuda.py#L309-L318 |
6,624 | instaloader/instaloader | instaloader/instaloader.py | Instaloader.download_pic | def download_pic(self, filename: str, url: str, mtime: datetime,
filename_suffix: Optional[str] = None, _attempt: int = 1) -> bool:
"""Downloads and saves picture with given url under given directory with given timestamp.
Returns true, if file was actually downloaded, i.e. updated."""
urlmatch = re.search('\\.[a-z0-9]*\\?', url)
file_extension = url[-3:] if urlmatch is None else urlmatch.group(0)[1:-1]
if filename_suffix is not None:
filename += '_' + filename_suffix
filename += '.' + file_extension
# A post is considered "commited" if the json file exists and is not malformed.
if self.commit_mode:
if self._committed and os.path.isfile(filename):
self.context.log(filename + ' exists', end=' ', flush=True)
return False
else:
if os.path.isfile(filename):
self.context.log(filename + ' exists', end=' ', flush=True)
return False
self.context.get_and_write_raw(url, filename)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
return True | python | def download_pic(self, filename: str, url: str, mtime: datetime,
filename_suffix: Optional[str] = None, _attempt: int = 1) -> bool:
"""Downloads and saves picture with given url under given directory with given timestamp.
Returns true, if file was actually downloaded, i.e. updated."""
urlmatch = re.search('\\.[a-z0-9]*\\?', url)
file_extension = url[-3:] if urlmatch is None else urlmatch.group(0)[1:-1]
if filename_suffix is not None:
filename += '_' + filename_suffix
filename += '.' + file_extension
# A post is considered "commited" if the json file exists and is not malformed.
if self.commit_mode:
if self._committed and os.path.isfile(filename):
self.context.log(filename + ' exists', end=' ', flush=True)
return False
else:
if os.path.isfile(filename):
self.context.log(filename + ' exists', end=' ', flush=True)
return False
self.context.get_and_write_raw(url, filename)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
return True | ['def', 'download_pic', '(', 'self', ',', 'filename', ':', 'str', ',', 'url', ':', 'str', ',', 'mtime', ':', 'datetime', ',', 'filename_suffix', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', '_attempt', ':', 'int', '=', '1', ')', '->', 'bool', ':', 'urlmatch', '=', 're', '.', 'search', '(', "'\\\\.[a-z0-9]*\\\\?'", ',', 'url', ')', 'file_extension', '=', 'url', '[', '-', '3', ':', ']', 'if', 'urlmatch', 'is', 'None', 'else', 'urlmatch', '.', 'group', '(', '0', ')', '[', '1', ':', '-', '1', ']', 'if', 'filename_suffix', 'is', 'not', 'None', ':', 'filename', '+=', "'_'", '+', 'filename_suffix', 'filename', '+=', "'.'", '+', 'file_extension', '# A post is considered "commited" if the json file exists and is not malformed.', 'if', 'self', '.', 'commit_mode', ':', 'if', 'self', '.', '_committed', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'self', '.', 'context', '.', 'log', '(', 'filename', '+', "' exists'", ',', 'end', '=', "' '", ',', 'flush', '=', 'True', ')', 'return', 'False', 'else', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'self', '.', 'context', '.', 'log', '(', 'filename', '+', "' exists'", ',', 'end', '=', "' '", ',', 'flush', '=', 'True', ')', 'return', 'False', 'self', '.', 'context', '.', 'get_and_write_raw', '(', 'url', ',', 'filename', ')', 'os', '.', 'utime', '(', 'filename', ',', '(', 'datetime', '.', 'now', '(', ')', '.', 'timestamp', '(', ')', ',', 'mtime', '.', 'timestamp', '(', ')', ')', ')', 'return', 'True'] | Downloads and saves picture with given url under given directory with given timestamp.
Returns true, if file was actually downloaded, i.e. updated. | ['Downloads', 'and', 'saves', 'picture', 'with', 'given', 'url', 'under', 'given', 'directory', 'with', 'given', 'timestamp', '.', 'Returns', 'true', 'if', 'file', 'was', 'actually', 'downloaded', 'i', '.', 'e', '.', 'updated', '.'] | train | https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L209-L229 |
6,625 | serge-sans-paille/pythran | pythran/analyses/aliases.py | Aliases.visit_Call | def visit_Call(self, node):
'''
Resulting node alias to the return_alias of called function,
if the function is already known by Pythran (i.e. it's an Intrinsic)
or if Pythran already computed it's ``return_alias`` behavior.
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> fun = """
... def f(a): return a
... def foo(b): c = f(b)"""
>>> module = ast.parse(fun)
The ``f`` function create aliasing between
the returned value and its first argument.
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f(b) => ['b']
This also works with intrinsics, e.g ``dict.setdefault`` which
may create alias between its third argument and the return value.
>>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)'
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
__builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a']
Note that complex cases can arise, when one of the formal parameter
is already known to alias to various values:
>>> fun = """
... def f(a, b): return a and b
... def foo(A, B, C, D): return f(A or B, C or D)"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f((A or B), (C or D)) => ['A', 'B', 'C', 'D']
'''
self.generic_visit(node)
f = node.func
# special handler for bind functions
if isinstance(f, ast.Attribute) and f.attr == "partial":
return self.add(node, {node})
else:
return_alias = self.call_return_alias(node)
# expand collected aliases
all_aliases = set()
for value in return_alias:
# no translation
if isinstance(value, (ContainerOf, ast.FunctionDef,
Intrinsic)):
all_aliases.add(value)
elif value in self.result:
all_aliases.update(self.result[value])
else:
try:
ap = Aliases.access_path(value)
all_aliases.update(self.aliases.get(ap, ()))
except NotImplementedError:
# should we do something better here?
all_aliases.add(value)
return self.add(node, all_aliases) | python | def visit_Call(self, node):
'''
Resulting node alias to the return_alias of called function,
if the function is already known by Pythran (i.e. it's an Intrinsic)
or if Pythran already computed it's ``return_alias`` behavior.
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> fun = """
... def f(a): return a
... def foo(b): c = f(b)"""
>>> module = ast.parse(fun)
The ``f`` function create aliasing between
the returned value and its first argument.
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f(b) => ['b']
This also works with intrinsics, e.g ``dict.setdefault`` which
may create alias between its third argument and the return value.
>>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)'
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
__builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a']
Note that complex cases can arise, when one of the formal parameter
is already known to alias to various values:
>>> fun = """
... def f(a, b): return a and b
... def foo(A, B, C, D): return f(A or B, C or D)"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f((A or B), (C or D)) => ['A', 'B', 'C', 'D']
'''
self.generic_visit(node)
f = node.func
# special handler for bind functions
if isinstance(f, ast.Attribute) and f.attr == "partial":
return self.add(node, {node})
else:
return_alias = self.call_return_alias(node)
# expand collected aliases
all_aliases = set()
for value in return_alias:
# no translation
if isinstance(value, (ContainerOf, ast.FunctionDef,
Intrinsic)):
all_aliases.add(value)
elif value in self.result:
all_aliases.update(self.result[value])
else:
try:
ap = Aliases.access_path(value)
all_aliases.update(self.aliases.get(ap, ()))
except NotImplementedError:
# should we do something better here?
all_aliases.add(value)
return self.add(node, all_aliases) | ['def', 'visit_Call', '(', 'self', ',', 'node', ')', ':', 'self', '.', 'generic_visit', '(', 'node', ')', 'f', '=', 'node', '.', 'func', '# special handler for bind functions', 'if', 'isinstance', '(', 'f', ',', 'ast', '.', 'Attribute', ')', 'and', 'f', '.', 'attr', '==', '"partial"', ':', 'return', 'self', '.', 'add', '(', 'node', ',', '{', 'node', '}', ')', 'else', ':', 'return_alias', '=', 'self', '.', 'call_return_alias', '(', 'node', ')', '# expand collected aliases', 'all_aliases', '=', 'set', '(', ')', 'for', 'value', 'in', 'return_alias', ':', '# no translation', 'if', 'isinstance', '(', 'value', ',', '(', 'ContainerOf', ',', 'ast', '.', 'FunctionDef', ',', 'Intrinsic', ')', ')', ':', 'all_aliases', '.', 'add', '(', 'value', ')', 'elif', 'value', 'in', 'self', '.', 'result', ':', 'all_aliases', '.', 'update', '(', 'self', '.', 'result', '[', 'value', ']', ')', 'else', ':', 'try', ':', 'ap', '=', 'Aliases', '.', 'access_path', '(', 'value', ')', 'all_aliases', '.', 'update', '(', 'self', '.', 'aliases', '.', 'get', '(', 'ap', ',', '(', ')', ')', ')', 'except', 'NotImplementedError', ':', '# should we do something better here?', 'all_aliases', '.', 'add', '(', 'value', ')', 'return', 'self', '.', 'add', '(', 'node', ',', 'all_aliases', ')'] | Resulting node alias to the return_alias of called function,
if the function is already known by Pythran (i.e. it's an Intrinsic)
or if Pythran already computed it's ``return_alias`` behavior.
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> fun = """
... def f(a): return a
... def foo(b): c = f(b)"""
>>> module = ast.parse(fun)
The ``f`` function create aliasing between
the returned value and its first argument.
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f(b) => ['b']
This also works with intrinsics, e.g ``dict.setdefault`` which
may create alias between its third argument and the return value.
>>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)'
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
__builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a']
Note that complex cases can arise, when one of the formal parameter
is already known to alias to various values:
>>> fun = """
... def f(a, b): return a and b
... def foo(A, B, C, D): return f(A or B, C or D)"""
>>> module = ast.parse(fun)
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Call)
f((A or B), (C or D)) => ['A', 'B', 'C', 'D'] | ['Resulting', 'node', 'alias', 'to', 'the', 'return_alias', 'of', 'called', 'function', 'if', 'the', 'function', 'is', 'already', 'known', 'by', 'Pythran', '(', 'i', '.', 'e', '.', 'it', 's', 'an', 'Intrinsic', ')', 'or', 'if', 'Pythran', 'already', 'computed', 'it', 's', 'return_alias', 'behavior', '.'] | train | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/aliases.py#L317-L380 |
6,626 | DLR-RM/RAFCON | source/rafcon/gui/mygaphas/tools.py | ConnectionTool._handle_temporary_connection | def _handle_temporary_connection(self, old_sink, new_sink, of_target=True):
"""Connect connection to new_sink
If new_sink is set, the connection origin or target will be set to new_sink. The connection to old_sink is
being removed.
:param gaphas.aspect.ConnectionSink old_sink: Old sink (if existing)
:param gaphas.aspect.ConnectionSink new_sink: New sink (if existing)
:param bool of_target: Whether the origin or target will be reconnected
:return:
"""
def sink_set_and_differs(sink_a, sink_b):
if not sink_a:
return False
if not sink_b:
return True
if sink_a.port != sink_b.port:
return True
return False
if sink_set_and_differs(old_sink, new_sink):
sink_port_v = old_sink.port.port_v
self._disconnect_temporarily(sink_port_v, target=of_target)
if sink_set_and_differs(new_sink, old_sink):
sink_port_v = new_sink.port.port_v
self._connect_temporarily(sink_port_v, target=of_target) | python | def _handle_temporary_connection(self, old_sink, new_sink, of_target=True):
"""Connect connection to new_sink
If new_sink is set, the connection origin or target will be set to new_sink. The connection to old_sink is
being removed.
:param gaphas.aspect.ConnectionSink old_sink: Old sink (if existing)
:param gaphas.aspect.ConnectionSink new_sink: New sink (if existing)
:param bool of_target: Whether the origin or target will be reconnected
:return:
"""
def sink_set_and_differs(sink_a, sink_b):
if not sink_a:
return False
if not sink_b:
return True
if sink_a.port != sink_b.port:
return True
return False
if sink_set_and_differs(old_sink, new_sink):
sink_port_v = old_sink.port.port_v
self._disconnect_temporarily(sink_port_v, target=of_target)
if sink_set_and_differs(new_sink, old_sink):
sink_port_v = new_sink.port.port_v
self._connect_temporarily(sink_port_v, target=of_target) | ['def', '_handle_temporary_connection', '(', 'self', ',', 'old_sink', ',', 'new_sink', ',', 'of_target', '=', 'True', ')', ':', 'def', 'sink_set_and_differs', '(', 'sink_a', ',', 'sink_b', ')', ':', 'if', 'not', 'sink_a', ':', 'return', 'False', 'if', 'not', 'sink_b', ':', 'return', 'True', 'if', 'sink_a', '.', 'port', '!=', 'sink_b', '.', 'port', ':', 'return', 'True', 'return', 'False', 'if', 'sink_set_and_differs', '(', 'old_sink', ',', 'new_sink', ')', ':', 'sink_port_v', '=', 'old_sink', '.', 'port', '.', 'port_v', 'self', '.', '_disconnect_temporarily', '(', 'sink_port_v', ',', 'target', '=', 'of_target', ')', 'if', 'sink_set_and_differs', '(', 'new_sink', ',', 'old_sink', ')', ':', 'sink_port_v', '=', 'new_sink', '.', 'port', '.', 'port_v', 'self', '.', '_connect_temporarily', '(', 'sink_port_v', ',', 'target', '=', 'of_target', ')'] | Connect connection to new_sink
If new_sink is set, the connection origin or target will be set to new_sink. The connection to old_sink is
being removed.
:param gaphas.aspect.ConnectionSink old_sink: Old sink (if existing)
:param gaphas.aspect.ConnectionSink new_sink: New sink (if existing)
:param bool of_target: Whether the origin or target will be reconnected
:return: | ['Connect', 'connection', 'to', 'new_sink'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/tools.py#L601-L628 |
6,627 | cocaine/cocaine-tools | cocaine/tools/dispatch.py | access_view | def access_view(name, **kwargs):
"""
Shows ACL for the specified service.
"""
ctx = Context(**kwargs)
ctx.execute_action('access:view', **{
'unicorn': ctx.repo.create_secure_service('unicorn'),
'service': name,
}) | python | def access_view(name, **kwargs):
"""
Shows ACL for the specified service.
"""
ctx = Context(**kwargs)
ctx.execute_action('access:view', **{
'unicorn': ctx.repo.create_secure_service('unicorn'),
'service': name,
}) | ['def', 'access_view', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'ctx', '=', 'Context', '(', '*', '*', 'kwargs', ')', 'ctx', '.', 'execute_action', '(', "'access:view'", ',', '*', '*', '{', "'unicorn'", ':', 'ctx', '.', 'repo', '.', 'create_secure_service', '(', "'unicorn'", ')', ',', "'service'", ':', 'name', ',', '}', ')'] | Shows ACL for the specified service. | ['Shows', 'ACL', 'for', 'the', 'specified', 'service', '.'] | train | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1915-L1923 |
6,628 | hannes-brt/cudnn-python-wrappers | libcudnn.py | cudnnSetPooling2dDescriptor | def cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth,
verticalPadding, horizontalPadding, verticalStride, horizontalStride):
""""
Initialize a 2D pooling descriptor.
This function initializes a previously created pooling descriptor object.
Parameters
----------
poolingDesc : cudnnPoolingDescriptor
Handle to a previously created pooling descriptor.
mode : cudnnPoolingMode
Enumerant to specify the pooling mode.
windowHeight : int
Height of the pooling window.
windowWidth : int
Width of the pooling window.
verticalPadding: int
Size of vertical padding.
horizontalPadding: int
Size of horizontal padding.
verticalStride : int
Pooling vertical stride.
horizontalStride : int
Pooling horizontal stride.
"""
status = _libcudnn.cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight,
windowWidth, verticalPadding, horizontalPadding,
verticalStride, horizontalStride)
cudnnCheckStatus(status) | python | def cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth,
verticalPadding, horizontalPadding, verticalStride, horizontalStride):
""""
Initialize a 2D pooling descriptor.
This function initializes a previously created pooling descriptor object.
Parameters
----------
poolingDesc : cudnnPoolingDescriptor
Handle to a previously created pooling descriptor.
mode : cudnnPoolingMode
Enumerant to specify the pooling mode.
windowHeight : int
Height of the pooling window.
windowWidth : int
Width of the pooling window.
verticalPadding: int
Size of vertical padding.
horizontalPadding: int
Size of horizontal padding.
verticalStride : int
Pooling vertical stride.
horizontalStride : int
Pooling horizontal stride.
"""
status = _libcudnn.cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight,
windowWidth, verticalPadding, horizontalPadding,
verticalStride, horizontalStride)
cudnnCheckStatus(status) | ['def', 'cudnnSetPooling2dDescriptor', '(', 'poolingDesc', ',', 'mode', ',', 'windowHeight', ',', 'windowWidth', ',', 'verticalPadding', ',', 'horizontalPadding', ',', 'verticalStride', ',', 'horizontalStride', ')', ':', 'status', '=', '_libcudnn', '.', 'cudnnSetPooling2dDescriptor', '(', 'poolingDesc', ',', 'mode', ',', 'windowHeight', ',', 'windowWidth', ',', 'verticalPadding', ',', 'horizontalPadding', ',', 'verticalStride', ',', 'horizontalStride', ')', 'cudnnCheckStatus', '(', 'status', ')'] | Initialize a 2D pooling descriptor.
This function initializes a previously created pooling descriptor object.
Parameters
----------
poolingDesc : cudnnPoolingDescriptor
Handle to a previously created pooling descriptor.
mode : cudnnPoolingMode
Enumerant to specify the pooling mode.
windowHeight : int
Height of the pooling window.
windowWidth : int
Width of the pooling window.
verticalPadding: int
Size of vertical padding.
horizontalPadding: int
Size of horizontal padding.
verticalStride : int
Pooling vertical stride.
horizontalStride : int
Pooling horizontal stride. | ['Initialize', 'a', '2D', 'pooling', 'descriptor', '.'] | train | https://github.com/hannes-brt/cudnn-python-wrappers/blob/55aab1242924c2fd43db150cf2ccc2a3df958dd5/libcudnn.py#L1621-L1651 |
6,629 | senaite/senaite.core | bika/lims/browser/analyses/view.py | AnalysesView.is_uncertainty_edition_allowed | def is_uncertainty_edition_allowed(self, analysis_brain):
"""Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Only allow to edit the uncertainty if result edition is allowed
if not self.is_result_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
# Manual setting of uncertainty is not allowed
if not obj.getAllowManualUncertainty():
return False
# Result is a detection limit -> uncertainty setting makes no sense!
if obj.getDetectionLimitOperand() in [LDL, UDL]:
return False
return True | python | def is_uncertainty_edition_allowed(self, analysis_brain):
"""Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Only allow to edit the uncertainty if result edition is allowed
if not self.is_result_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
# Manual setting of uncertainty is not allowed
if not obj.getAllowManualUncertainty():
return False
# Result is a detection limit -> uncertainty setting makes no sense!
if obj.getDetectionLimitOperand() in [LDL, UDL]:
return False
return True | ['def', 'is_uncertainty_edition_allowed', '(', 'self', ',', 'analysis_brain', ')', ':', '# Only allow to edit the uncertainty if result edition is allowed', 'if', 'not', 'self', '.', 'is_result_edition_allowed', '(', 'analysis_brain', ')', ':', 'return', 'False', '# Get the ananylsis object', 'obj', '=', 'api', '.', 'get_object', '(', 'analysis_brain', ')', '# Manual setting of uncertainty is not allowed', 'if', 'not', 'obj', '.', 'getAllowManualUncertainty', '(', ')', ':', 'return', 'False', '# Result is a detection limit -> uncertainty setting makes no sense!', 'if', 'obj', '.', 'getDetectionLimitOperand', '(', ')', 'in', '[', 'LDL', ',', 'UDL', ']', ':', 'return', 'False', 'return', 'True'] | Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False | ['Checks', 'if', 'the', 'edition', 'of', 'the', 'uncertainty', 'field', 'is', 'allowed'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L323-L345 |
6,630 | jeremyschulman/halutz | halutz/client.py | Client.load_swagger_spec | def load_swagger_spec(self, filepath=None):
"""
Loads the origin_spec from a local JSON file. If `filepath`
is not provided, then the class `file_spec` format will be used
to create the file-path value.
"""
if filepath is True or filepath is None:
filepath = self.file_spec.format(server=self.server)
return json.load(open(filepath)) | python | def load_swagger_spec(self, filepath=None):
"""
Loads the origin_spec from a local JSON file. If `filepath`
is not provided, then the class `file_spec` format will be used
to create the file-path value.
"""
if filepath is True or filepath is None:
filepath = self.file_spec.format(server=self.server)
return json.load(open(filepath)) | ['def', 'load_swagger_spec', '(', 'self', ',', 'filepath', '=', 'None', ')', ':', 'if', 'filepath', 'is', 'True', 'or', 'filepath', 'is', 'None', ':', 'filepath', '=', 'self', '.', 'file_spec', '.', 'format', '(', 'server', '=', 'self', '.', 'server', ')', 'return', 'json', '.', 'load', '(', 'open', '(', 'filepath', ')', ')'] | Loads the origin_spec from a local JSON file. If `filepath`
is not provided, then the class `file_spec` format will be used
to create the file-path value. | ['Loads', 'the', 'origin_spec', 'from', 'a', 'local', 'JSON', 'file', '.', 'If', 'filepath', 'is', 'not', 'provided', 'then', 'the', 'class', 'file_spec', 'format', 'will', 'be', 'used', 'to', 'create', 'the', 'file', '-', 'path', 'value', '.'] | train | https://github.com/jeremyschulman/halutz/blob/6bb398dc99bf723daabd9eda02494a11252ee109/halutz/client.py#L98-L107 |
6,631 | 20c/twentyc.tools | twentyc/tools/config.py | dict_conf | def dict_conf(filename):
"""
Return dict object for *.conf file
"""
f, ext = os.path.splitext(filename)
ext = ext.lower()
if ext == "conf" or ext == "ini":
# python config via config parser
config = ConfigParser()
config.optionxform=str
config.read(filename)
rv = {}
for section in config.sections():
rv[section] = {}
for key,value in config.items(section):
rv[section][key] = value.strip('"').strip("'").decode("string_escape")
return rv
else:
# other type of config, use munge
if munge_config:
src = munge_config.parse_url(filename)
return src.cls().load(open(filename)).get("vodka")
else:
raise Exception("'%s' type of config encountered, install munge" % ext) | python | def dict_conf(filename):
"""
Return dict object for *.conf file
"""
f, ext = os.path.splitext(filename)
ext = ext.lower()
if ext == "conf" or ext == "ini":
# python config via config parser
config = ConfigParser()
config.optionxform=str
config.read(filename)
rv = {}
for section in config.sections():
rv[section] = {}
for key,value in config.items(section):
rv[section][key] = value.strip('"').strip("'").decode("string_escape")
return rv
else:
# other type of config, use munge
if munge_config:
src = munge_config.parse_url(filename)
return src.cls().load(open(filename)).get("vodka")
else:
raise Exception("'%s' type of config encountered, install munge" % ext) | ['def', 'dict_conf', '(', 'filename', ')', ':', 'f', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', 'ext', '=', 'ext', '.', 'lower', '(', ')', 'if', 'ext', '==', '"conf"', 'or', 'ext', '==', '"ini"', ':', '# python config via config parser', 'config', '=', 'ConfigParser', '(', ')', 'config', '.', 'optionxform', '=', 'str', 'config', '.', 'read', '(', 'filename', ')', 'rv', '=', '{', '}', 'for', 'section', 'in', 'config', '.', 'sections', '(', ')', ':', 'rv', '[', 'section', ']', '=', '{', '}', 'for', 'key', ',', 'value', 'in', 'config', '.', 'items', '(', 'section', ')', ':', 'rv', '[', 'section', ']', '[', 'key', ']', '=', 'value', '.', 'strip', '(', '\'"\'', ')', '.', 'strip', '(', '"\'"', ')', '.', 'decode', '(', '"string_escape"', ')', 'return', 'rv', 'else', ':', '# other type of config, use munge', 'if', 'munge_config', ':', 'src', '=', 'munge_config', '.', 'parse_url', '(', 'filename', ')', 'return', 'src', '.', 'cls', '(', ')', '.', 'load', '(', 'open', '(', 'filename', ')', ')', '.', 'get', '(', '"vodka"', ')', 'else', ':', 'raise', 'Exception', '(', '"\'%s\' type of config encountered, install munge"', '%', 'ext', ')'] | Return dict object for *.conf file | ['Return', 'dict', 'object', 'for', '*', '.', 'conf', 'file'] | train | https://github.com/20c/twentyc.tools/blob/f8f681e64f58d449bfc32646ba8bcc57db90a233/twentyc/tools/config.py#L9-L35 |
6,632 | pepkit/peppy | peppy/utils.py | fetch_samples | def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples)) | python | def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples)) | ['def', 'fetch_samples', '(', 'proj', ',', 'selector_attribute', '=', 'None', ',', 'selector_include', '=', 'None', ',', 'selector_exclude', '=', 'None', ')', ':', 'if', 'selector_attribute', 'is', 'None', 'or', '(', 'not', 'selector_include', 'and', 'not', 'selector_exclude', ')', ':', '# Simple; keep all samples. In this case, this function simply', '# offers a list rather than an iterator.', 'return', 'list', '(', 'proj', '.', 'samples', ')', '# At least one of the samples has to have the specified attribute', 'if', 'proj', '.', 'samples', 'and', 'not', 'any', '(', '[', 'hasattr', '(', 'i', ',', 'selector_attribute', ')', 'for', 'i', 'in', 'proj', '.', 'samples', ']', ')', ':', 'raise', 'AttributeError', '(', '"The Project samples do not have the attribute \'{attr}\'"', '.', 'format', '(', 'attr', '=', 'selector_attribute', ')', ')', '# Intersection between selector_include and selector_exclude is nonsense user error.', 'if', 'selector_include', 'and', 'selector_exclude', ':', 'raise', 'TypeError', '(', '"Specify only selector_include or selector_exclude parameter, "', '"not both."', ')', "# Ensure that we're working with sets.", 'def', 'make_set', '(', 'items', ')', ':', 'if', 'isinstance', '(', 'items', ',', 'str', ')', ':', 'items', '=', '[', 'items', ']', 'return', 'items', '# Use the attr check here rather than exception block in case the', '# hypothetical AttributeError would occur; we want such', '# an exception to arise, not to catch it as if the Sample lacks "protocol"', 'if', 'not', 'selector_include', ':', '# Loose; keep all samples not in the selector_exclude.', 'def', 'keep', '(', 's', ')', ':', 'return', 'not', 'hasattr', '(', 's', ',', 'selector_attribute', ')', 'or', 'getattr', '(', 's', ',', 'selector_attribute', ')', 'not', 'in', 'make_set', '(', 'selector_exclude', ')', 'else', ':', '# Strict; keep only samples in the selector_include.', 'def', 'keep', '(', 's', ')', ':', 'return', 'hasattr', '(', 's', ',', 'selector_attribute', ')', 'and', 'getattr', '(', 's', ',', 'selector_attribute', ')', 'in', 'make_set', '(', 'selector_include', ')', 'return', 'list', '(', 'filter', '(', 'keep', ',', 'proj', '.', 'samples', ')', ')'] | Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2 | ['Collect', 'samples', 'of', 'particular', 'protocol', '(', 's', ')', '.'] | train | https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L153-L211 |
6,633 | Nachtfeuer/pipeline | spline/tools/condition.py | TokensCompressor.__end_of_list | def __end_of_list(self, ast_token):
"""Handle end of a list."""
self.list_level -= 1
if self.list_level == 0:
if self.list_entry is not None:
self.final_ast_tokens.append(self.list_entry)
self.list_entry = None
self.final_ast_tokens.append(ast_token) | python | def __end_of_list(self, ast_token):
"""Handle end of a list."""
self.list_level -= 1
if self.list_level == 0:
if self.list_entry is not None:
self.final_ast_tokens.append(self.list_entry)
self.list_entry = None
self.final_ast_tokens.append(ast_token) | ['def', '__end_of_list', '(', 'self', ',', 'ast_token', ')', ':', 'self', '.', 'list_level', '-=', '1', 'if', 'self', '.', 'list_level', '==', '0', ':', 'if', 'self', '.', 'list_entry', 'is', 'not', 'None', ':', 'self', '.', 'final_ast_tokens', '.', 'append', '(', 'self', '.', 'list_entry', ')', 'self', '.', 'list_entry', '=', 'None', 'self', '.', 'final_ast_tokens', '.', 'append', '(', 'ast_token', ')'] | Handle end of a list. | ['Handle', 'end', 'of', 'a', 'list', '.'] | train | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L47-L54 |
6,634 | googlefonts/ufo2ft | Lib/ufo2ft/maxContextCalc.py | maxCtxSubtable | def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == 'GPOS' and lookupType == 1) or (
tag == 'GSUB' and lookupType in (1, 2, 3)):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == 'GPOS' and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == 'GSUB' and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, ligature.CompCount)
# context
elif (tag == 'GPOS' and lookupType == 7) or (
tag == 'GSUB' and lookupType == 5):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub')
# chained context
elif (tag == 'GPOS' and lookupType == 8) or (
tag == 'GSUB' and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain')
# extensions
elif (tag == 'GPOS' and lookupType == 9) or (
tag == 'GSUB' and lookupType == 7):
maxCtx = maxCtxSubtable(
maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == 'GSUB' and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse')
return maxCtx | python | def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == 'GPOS' and lookupType == 1) or (
tag == 'GSUB' and lookupType in (1, 2, 3)):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == 'GPOS' and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == 'GSUB' and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, ligature.CompCount)
# context
elif (tag == 'GPOS' and lookupType == 7) or (
tag == 'GSUB' and lookupType == 5):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub')
# chained context
elif (tag == 'GPOS' and lookupType == 8) or (
tag == 'GSUB' and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain')
# extensions
elif (tag == 'GPOS' and lookupType == 9) or (
tag == 'GSUB' and lookupType == 7):
maxCtx = maxCtxSubtable(
maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == 'GSUB' and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse')
return maxCtx | ['def', 'maxCtxSubtable', '(', 'maxCtx', ',', 'tag', ',', 'lookupType', ',', 'st', ')', ':', '# single positioning, single / multiple substitution', 'if', '(', 'tag', '==', "'GPOS'", 'and', 'lookupType', '==', '1', ')', 'or', '(', 'tag', '==', "'GSUB'", 'and', 'lookupType', 'in', '(', '1', ',', '2', ',', '3', ')', ')', ':', 'maxCtx', '=', 'max', '(', 'maxCtx', ',', '1', ')', '# pair positioning', 'elif', 'tag', '==', "'GPOS'", 'and', 'lookupType', '==', '2', ':', 'maxCtx', '=', 'max', '(', 'maxCtx', ',', '2', ')', '# ligatures', 'elif', 'tag', '==', "'GSUB'", 'and', 'lookupType', '==', '4', ':', 'for', 'ligatures', 'in', 'st', '.', 'ligatures', '.', 'values', '(', ')', ':', 'for', 'ligature', 'in', 'ligatures', ':', 'maxCtx', '=', 'max', '(', 'maxCtx', ',', 'ligature', '.', 'CompCount', ')', '# context', 'elif', '(', 'tag', '==', "'GPOS'", 'and', 'lookupType', '==', '7', ')', 'or', '(', 'tag', '==', "'GSUB'", 'and', 'lookupType', '==', '5', ')', ':', 'maxCtx', '=', 'maxCtxContextualSubtable', '(', 'maxCtx', ',', 'st', ',', "'Pos'", 'if', 'tag', '==', "'GPOS'", 'else', "'Sub'", ')', '# chained context', 'elif', '(', 'tag', '==', "'GPOS'", 'and', 'lookupType', '==', '8', ')', 'or', '(', 'tag', '==', "'GSUB'", 'and', 'lookupType', '==', '6', ')', ':', 'maxCtx', '=', 'maxCtxContextualSubtable', '(', 'maxCtx', ',', 'st', ',', "'Pos'", 'if', 'tag', '==', "'GPOS'", 'else', "'Sub'", ',', "'Chain'", ')', '# extensions', 'elif', '(', 'tag', '==', "'GPOS'", 'and', 'lookupType', '==', '9', ')', 'or', '(', 'tag', '==', "'GSUB'", 'and', 'lookupType', '==', '7', ')', ':', 'maxCtx', '=', 'maxCtxSubtable', '(', 'maxCtx', ',', 'tag', ',', 'st', '.', 'ExtensionLookupType', ',', 'st', '.', 'ExtSubTable', ')', '# reverse-chained context', 'elif', 'tag', '==', "'GSUB'", 'and', 'lookupType', '==', '8', ':', 'maxCtx', '=', 'maxCtxContextualRule', '(', 'maxCtx', ',', 'st', ',', "'Reverse'", ')', 'return', 'maxCtx'] | Calculate usMaxContext based on a single lookup table (and an existing
max value). | ['Calculate', 'usMaxContext', 'based', 'on', 'a', 'single', 'lookup', 'table', '(', 'and', 'an', 'existing', 'max', 'value', ')', '.'] | train | https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/maxContextCalc.py#L22-L64 |
6,635 | JamesRamm/archook | archook/archook.py | locate_arcgis | def locate_arcgis():
'''
Find the path to the ArcGIS Desktop installation.
Keys to check:
HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use
that to go to
HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version
We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead
'''
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Wow6432Node\\ESRI\\ArcGIS', 0)
version = _winreg.QueryValueEx(key, "RealVersion")[0][:4]
key_string = "SOFTWARE\\Wow6432Node\\ESRI\\Desktop{0}".format(version)
desktop_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
key_string, 0)
install_dir = _winreg.QueryValueEx(desktop_key, "InstallDir")[0]
return install_dir
except WindowsError:
raise ImportError("Could not locate the ArcGIS directory on this machine") | python | def locate_arcgis():
'''
Find the path to the ArcGIS Desktop installation.
Keys to check:
HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use
that to go to
HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version
We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead
'''
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Wow6432Node\\ESRI\\ArcGIS', 0)
version = _winreg.QueryValueEx(key, "RealVersion")[0][:4]
key_string = "SOFTWARE\\Wow6432Node\\ESRI\\Desktop{0}".format(version)
desktop_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
key_string, 0)
install_dir = _winreg.QueryValueEx(desktop_key, "InstallDir")[0]
return install_dir
except WindowsError:
raise ImportError("Could not locate the ArcGIS directory on this machine") | ['def', 'locate_arcgis', '(', ')', ':', 'try', ':', 'key', '=', '_winreg', '.', 'OpenKey', '(', '_winreg', '.', 'HKEY_LOCAL_MACHINE', ',', "'SOFTWARE\\\\Wow6432Node\\\\ESRI\\\\ArcGIS'", ',', '0', ')', 'version', '=', '_winreg', '.', 'QueryValueEx', '(', 'key', ',', '"RealVersion"', ')', '[', '0', ']', '[', ':', '4', ']', 'key_string', '=', '"SOFTWARE\\\\Wow6432Node\\\\ESRI\\\\Desktop{0}"', '.', 'format', '(', 'version', ')', 'desktop_key', '=', '_winreg', '.', 'OpenKey', '(', '_winreg', '.', 'HKEY_LOCAL_MACHINE', ',', 'key_string', ',', '0', ')', 'install_dir', '=', '_winreg', '.', 'QueryValueEx', '(', 'desktop_key', ',', '"InstallDir"', ')', '[', '0', ']', 'return', 'install_dir', 'except', 'WindowsError', ':', 'raise', 'ImportError', '(', '"Could not locate the ArcGIS directory on this machine"', ')'] | Find the path to the ArcGIS Desktop installation.
Keys to check:
HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use
that to go to
HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version
We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead | ['Find', 'the', 'path', 'to', 'the', 'ArcGIS', 'Desktop', 'installation', '.'] | train | https://github.com/JamesRamm/archook/blob/4cfe26802d9bd9a892f80c5a186e91a2ed142a7e/archook/archook.py#L12-L37 |
6,636 | mikedh/trimesh | trimesh/creation.py | validate_polygon | def validate_polygon(obj):
"""
Make sure an input can be returned as a valid polygon.
Parameters
-------------
obj : shapely.geometry.Polygon, str (wkb), or (n, 2) float
Object which might be a polygon
Returns
------------
polygon : shapely.geometry.Polygon
Valid polygon object
Raises
-------------
ValueError
If a valid finite- area polygon isn't available
"""
if isinstance(obj, Polygon):
polygon = obj
elif util.is_shape(obj, (-1, 2)):
polygon = Polygon(obj)
elif util.is_string(obj):
polygon = load_wkb(obj)
else:
raise ValueError('Input not a polygon!')
if (not polygon.is_valid or
polygon.area < tol.zero):
raise ValueError('Polygon is zero- area or invalid!')
return polygon | python | def validate_polygon(obj):
"""
Make sure an input can be returned as a valid polygon.
Parameters
-------------
obj : shapely.geometry.Polygon, str (wkb), or (n, 2) float
Object which might be a polygon
Returns
------------
polygon : shapely.geometry.Polygon
Valid polygon object
Raises
-------------
ValueError
If a valid finite- area polygon isn't available
"""
if isinstance(obj, Polygon):
polygon = obj
elif util.is_shape(obj, (-1, 2)):
polygon = Polygon(obj)
elif util.is_string(obj):
polygon = load_wkb(obj)
else:
raise ValueError('Input not a polygon!')
if (not polygon.is_valid or
polygon.area < tol.zero):
raise ValueError('Polygon is zero- area or invalid!')
return polygon | ['def', 'validate_polygon', '(', 'obj', ')', ':', 'if', 'isinstance', '(', 'obj', ',', 'Polygon', ')', ':', 'polygon', '=', 'obj', 'elif', 'util', '.', 'is_shape', '(', 'obj', ',', '(', '-', '1', ',', '2', ')', ')', ':', 'polygon', '=', 'Polygon', '(', 'obj', ')', 'elif', 'util', '.', 'is_string', '(', 'obj', ')', ':', 'polygon', '=', 'load_wkb', '(', 'obj', ')', 'else', ':', 'raise', 'ValueError', '(', "'Input not a polygon!'", ')', 'if', '(', 'not', 'polygon', '.', 'is_valid', 'or', 'polygon', '.', 'area', '<', 'tol', '.', 'zero', ')', ':', 'raise', 'ValueError', '(', "'Polygon is zero- area or invalid!'", ')', 'return', 'polygon'] | Make sure an input can be returned as a valid polygon.
Parameters
-------------
obj : shapely.geometry.Polygon, str (wkb), or (n, 2) float
Object which might be a polygon
Returns
------------
polygon : shapely.geometry.Polygon
Valid polygon object
Raises
-------------
ValueError
If a valid finite- area polygon isn't available | ['Make', 'sure', 'an', 'input', 'can', 'be', 'returned', 'as', 'a', 'valid', 'polygon', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/creation.py#L31-L62 |
6,637 | pyQode/pyqode.core | pyqode/core/widgets/interactive.py | InteractiveConsole.stop_process | def stop_process(self):
"""
Stop the process (by killing it).
"""
if self.process is not None:
self._user_stop = True
self.process.kill()
self.setReadOnly(True)
self._running = False | python | def stop_process(self):
"""
Stop the process (by killing it).
"""
if self.process is not None:
self._user_stop = True
self.process.kill()
self.setReadOnly(True)
self._running = False | ['def', 'stop_process', '(', 'self', ')', ':', 'if', 'self', '.', 'process', 'is', 'not', 'None', ':', 'self', '.', '_user_stop', '=', 'True', 'self', '.', 'process', '.', 'kill', '(', ')', 'self', '.', 'setReadOnly', '(', 'True', ')', 'self', '.', '_running', '=', 'False'] | Stop the process (by killing it). | ['Stop', 'the', 'process', '(', 'by', 'killing', 'it', ')', '.'] | train | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/interactive.py#L320-L328 |
6,638 | saltstack/salt | salt/modules/solr.py | _format_url | def _format_url(handler, host=None, core_name=None, extra=None):
'''
PRIVATE METHOD
Formats the URL based on parameters, and if cores are used or not
handler : str
The request handler to hit.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you
are not using cores or if you want to check all cores.
extra : list<str> ([])
A list of name value pairs in string format. e.g. ['name=value']
Return: str
Fully formatted URL (http://<host>:<port>/solr/<handler>?wt=json&<extra>)
'''
extra = [] if extra is None else extra
if _get_none_or_value(host) is None or host == 'None':
host = __salt__['config.option']('solr.host')
port = __salt__['config.option']('solr.port')
baseurl = __salt__['config.option']('solr.baseurl')
if _get_none_or_value(core_name) is None:
if not extra:
return "http://{0}:{1}{2}/{3}?wt=json".format(
host, port, baseurl, handler)
else:
return "http://{0}:{1}{2}/{3}?wt=json&{4}".format(
host, port, baseurl, handler, "&".join(extra))
else:
if not extra:
return "http://{0}:{1}{2}/{3}/{4}?wt=json".format(
host, port, baseurl, core_name, handler)
else:
return "http://{0}:{1}{2}/{3}/{4}?wt=json&{5}".format(
host, port, baseurl, core_name, handler, "&".join(extra)) | python | def _format_url(handler, host=None, core_name=None, extra=None):
'''
PRIVATE METHOD
Formats the URL based on parameters, and if cores are used or not
handler : str
The request handler to hit.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you
are not using cores or if you want to check all cores.
extra : list<str> ([])
A list of name value pairs in string format. e.g. ['name=value']
Return: str
Fully formatted URL (http://<host>:<port>/solr/<handler>?wt=json&<extra>)
'''
extra = [] if extra is None else extra
if _get_none_or_value(host) is None or host == 'None':
host = __salt__['config.option']('solr.host')
port = __salt__['config.option']('solr.port')
baseurl = __salt__['config.option']('solr.baseurl')
if _get_none_or_value(core_name) is None:
if not extra:
return "http://{0}:{1}{2}/{3}?wt=json".format(
host, port, baseurl, handler)
else:
return "http://{0}:{1}{2}/{3}?wt=json&{4}".format(
host, port, baseurl, handler, "&".join(extra))
else:
if not extra:
return "http://{0}:{1}{2}/{3}/{4}?wt=json".format(
host, port, baseurl, core_name, handler)
else:
return "http://{0}:{1}{2}/{3}/{4}?wt=json&{5}".format(
host, port, baseurl, core_name, handler, "&".join(extra)) | ['def', '_format_url', '(', 'handler', ',', 'host', '=', 'None', ',', 'core_name', '=', 'None', ',', 'extra', '=', 'None', ')', ':', 'extra', '=', '[', ']', 'if', 'extra', 'is', 'None', 'else', 'extra', 'if', '_get_none_or_value', '(', 'host', ')', 'is', 'None', 'or', 'host', '==', "'None'", ':', 'host', '=', '__salt__', '[', "'config.option'", ']', '(', "'solr.host'", ')', 'port', '=', '__salt__', '[', "'config.option'", ']', '(', "'solr.port'", ')', 'baseurl', '=', '__salt__', '[', "'config.option'", ']', '(', "'solr.baseurl'", ')', 'if', '_get_none_or_value', '(', 'core_name', ')', 'is', 'None', ':', 'if', 'not', 'extra', ':', 'return', '"http://{0}:{1}{2}/{3}?wt=json"', '.', 'format', '(', 'host', ',', 'port', ',', 'baseurl', ',', 'handler', ')', 'else', ':', 'return', '"http://{0}:{1}{2}/{3}?wt=json&{4}"', '.', 'format', '(', 'host', ',', 'port', ',', 'baseurl', ',', 'handler', ',', '"&"', '.', 'join', '(', 'extra', ')', ')', 'else', ':', 'if', 'not', 'extra', ':', 'return', '"http://{0}:{1}{2}/{3}/{4}?wt=json"', '.', 'format', '(', 'host', ',', 'port', ',', 'baseurl', ',', 'core_name', ',', 'handler', ')', 'else', ':', 'return', '"http://{0}:{1}{2}/{3}/{4}?wt=json&{5}"', '.', 'format', '(', 'host', ',', 'port', ',', 'baseurl', ',', 'core_name', ',', 'handler', ',', '"&"', '.', 'join', '(', 'extra', ')', ')'] | PRIVATE METHOD
Formats the URL based on parameters, and if cores are used or not
handler : str
The request handler to hit.
host : str (None)
The solr host to query. __opts__['host'] is default
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you
are not using cores or if you want to check all cores.
extra : list<str> ([])
A list of name value pairs in string format. e.g. ['name=value']
Return: str
Fully formatted URL (http://<host>:<port>/solr/<handler>?wt=json&<extra>) | ['PRIVATE', 'METHOD', 'Formats', 'the', 'URL', 'based', 'on', 'parameters', 'and', 'if', 'cores', 'are', 'used', 'or', 'not'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L197-L233 |
6,639 | voicecom/pgtool | pgtool/pgtool.py | cmd_move | def cmd_move(db=None):
"""Rename a database within a server.
When used with --force, an existing database with the same name as DEST is replaced, the original is renamed out of
place in the form DEST_old_YYYYMMDD (unless --no-backup is specified).
"""
if db is None:
db = connect()
pg_move_extended(db, args.src, args.dest) | python | def cmd_move(db=None):
"""Rename a database within a server.
When used with --force, an existing database with the same name as DEST is replaced, the original is renamed out of
place in the form DEST_old_YYYYMMDD (unless --no-backup is specified).
"""
if db is None:
db = connect()
pg_move_extended(db, args.src, args.dest) | ['def', 'cmd_move', '(', 'db', '=', 'None', ')', ':', 'if', 'db', 'is', 'None', ':', 'db', '=', 'connect', '(', ')', 'pg_move_extended', '(', 'db', ',', 'args', '.', 'src', ',', 'args', '.', 'dest', ')'] | Rename a database within a server.
When used with --force, an existing database with the same name as DEST is replaced, the original is renamed out of
place in the form DEST_old_YYYYMMDD (unless --no-backup is specified). | ['Rename', 'a', 'database', 'within', 'a', 'server', '.'] | train | https://github.com/voicecom/pgtool/blob/36b8682bfca614d784fe58451e0cbc41315bc72e/pgtool/pgtool.py#L309-L318 |
6,640 | ArchiveTeam/wpull | wpull/processor/ftp.py | FTPProcessorSession._handle_response | def _handle_response(self, request: Request, response: Response):
'''Process a response.'''
self._item_session.update_record_value(status_code=response.reply.code)
is_listing = isinstance(response, ListingResponse)
if is_listing and not self._processor.fetch_params.remove_listing or \
not is_listing:
filename = self._file_writer_session.save_document(response)
action = self._result_rule.handle_document(self._item_session, filename)
else:
self._file_writer_session.discard_document(response)
action = self._result_rule.handle_no_document(self._item_session)
if isinstance(response, ListingResponse):
self._add_listing_links(response)
return action | python | def _handle_response(self, request: Request, response: Response):
'''Process a response.'''
self._item_session.update_record_value(status_code=response.reply.code)
is_listing = isinstance(response, ListingResponse)
if is_listing and not self._processor.fetch_params.remove_listing or \
not is_listing:
filename = self._file_writer_session.save_document(response)
action = self._result_rule.handle_document(self._item_session, filename)
else:
self._file_writer_session.discard_document(response)
action = self._result_rule.handle_no_document(self._item_session)
if isinstance(response, ListingResponse):
self._add_listing_links(response)
return action | ['def', '_handle_response', '(', 'self', ',', 'request', ':', 'Request', ',', 'response', ':', 'Response', ')', ':', 'self', '.', '_item_session', '.', 'update_record_value', '(', 'status_code', '=', 'response', '.', 'reply', '.', 'code', ')', 'is_listing', '=', 'isinstance', '(', 'response', ',', 'ListingResponse', ')', 'if', 'is_listing', 'and', 'not', 'self', '.', '_processor', '.', 'fetch_params', '.', 'remove_listing', 'or', 'not', 'is_listing', ':', 'filename', '=', 'self', '.', '_file_writer_session', '.', 'save_document', '(', 'response', ')', 'action', '=', 'self', '.', '_result_rule', '.', 'handle_document', '(', 'self', '.', '_item_session', ',', 'filename', ')', 'else', ':', 'self', '.', '_file_writer_session', '.', 'discard_document', '(', 'response', ')', 'action', '=', 'self', '.', '_result_rule', '.', 'handle_no_document', '(', 'self', '.', '_item_session', ')', 'if', 'isinstance', '(', 'response', ',', 'ListingResponse', ')', ':', 'self', '.', '_add_listing_links', '(', 'response', ')', 'return', 'action'] | Process a response. | ['Process', 'a', 'response', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/ftp.py#L377-L393 |
6,641 | tradenity/python-sdk | tradenity/resources/table_rate_shipping.py | TableRateShipping.get_table_rate_shipping_by_id | def get_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data | python | def get_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data | ['def', 'get_table_rate_shipping_by_id', '(', 'cls', ',', 'table_rate_shipping_id', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async'", ')', ':', 'return', 'cls', '.', '_get_table_rate_shipping_by_id_with_http_info', '(', 'table_rate_shipping_id', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'cls', '.', '_get_table_rate_shipping_by_id_with_http_info', '(', 'table_rate_shipping_id', ',', '*', '*', 'kwargs', ')', 'return', 'data'] | Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread. | ['Find', 'TableRateShipping'] | train | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/table_rate_shipping.py#L660-L680 |
6,642 | dmwilcox/vcard-tools | vcardtools/vcf_merge.py | VcardFieldsEqual | def VcardFieldsEqual(field1, field2):
"""Handle comparing vCard fields where inputs are lists of components.
Handle parameters? Are any used aside from 'TYPE'?
Note: force cast to string to compare sub-objects like Name and Address
"""
field1_vals = set([ str(f.value) for f in field1 ])
field2_vals = set([ str(f.value) for f in field2 ])
if field1_vals == field2_vals:
return True
else:
return False | python | def VcardFieldsEqual(field1, field2):
"""Handle comparing vCard fields where inputs are lists of components.
Handle parameters? Are any used aside from 'TYPE'?
Note: force cast to string to compare sub-objects like Name and Address
"""
field1_vals = set([ str(f.value) for f in field1 ])
field2_vals = set([ str(f.value) for f in field2 ])
if field1_vals == field2_vals:
return True
else:
return False | ['def', 'VcardFieldsEqual', '(', 'field1', ',', 'field2', ')', ':', 'field1_vals', '=', 'set', '(', '[', 'str', '(', 'f', '.', 'value', ')', 'for', 'f', 'in', 'field1', ']', ')', 'field2_vals', '=', 'set', '(', '[', 'str', '(', 'f', '.', 'value', ')', 'for', 'f', 'in', 'field2', ']', ')', 'if', 'field1_vals', '==', 'field2_vals', ':', 'return', 'True', 'else', ':', 'return', 'False'] | Handle comparing vCard fields where inputs are lists of components.
Handle parameters? Are any used aside from 'TYPE'?
Note: force cast to string to compare sub-objects like Name and Address | ['Handle', 'comparing', 'vCard', 'fields', 'where', 'inputs', 'are', 'lists', 'of', 'components', '.'] | train | https://github.com/dmwilcox/vcard-tools/blob/1b0f62a0f4c128c7a212ecdca34ff2acb746b262/vcardtools/vcf_merge.py#L33-L44 |
6,643 | rueckstiess/mtools | mtools/util/log2code.py | Log2CodeConverter._variable_parts | def _variable_parts(self, line, codeline):
"""Return variable parts of the codeline, given the static parts."""
var_subs = []
# codeline has pattern and then has the outputs in different versions
if codeline:
var_subs = self._find_variable(codeline.pattern, line)
else:
# make variable part of the line string without all the other stuff
line_str = self._strip_datetime(self._strip_counters(line))
var_subs = [line_str.strip()]
return var_subs | python | def _variable_parts(self, line, codeline):
"""Return variable parts of the codeline, given the static parts."""
var_subs = []
# codeline has pattern and then has the outputs in different versions
if codeline:
var_subs = self._find_variable(codeline.pattern, line)
else:
# make variable part of the line string without all the other stuff
line_str = self._strip_datetime(self._strip_counters(line))
var_subs = [line_str.strip()]
return var_subs | ['def', '_variable_parts', '(', 'self', ',', 'line', ',', 'codeline', ')', ':', 'var_subs', '=', '[', ']', '# codeline has pattern and then has the outputs in different versions', 'if', 'codeline', ':', 'var_subs', '=', 'self', '.', '_find_variable', '(', 'codeline', '.', 'pattern', ',', 'line', ')', 'else', ':', '# make variable part of the line string without all the other stuff', 'line_str', '=', 'self', '.', '_strip_datetime', '(', 'self', '.', '_strip_counters', '(', 'line', ')', ')', 'var_subs', '=', '[', 'line_str', '.', 'strip', '(', ')', ']', 'return', 'var_subs'] | Return variable parts of the codeline, given the static parts. | ['Return', 'variable', 'parts', 'of', 'the', 'codeline', 'given', 'the', 'static', 'parts', '.'] | train | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/log2code.py#L134-L144 |
6,644 | gem/oq-engine | openquake/hmtk/seismicity/utils.py | bootstrap_histogram_1D | def bootstrap_histogram_1D(
values, intervals, uncertainties=None,
normalisation=False, number_bootstraps=None, boundaries=None):
'''
Bootstrap samples a set of vectors
:param numpy.ndarray values:
The data values
:param numpy.ndarray intervals:
The bin edges
:param numpy.ndarray uncertainties:
The standard deviations of each observation
:param bool normalisation:
If True then returns the histogram as a density function
:param int number_bootstraps:
Number of bootstraps
:param tuple boundaries:
(Lower, Upper) bounds on the data
:param returns:
1-D histogram of data
'''
if not number_bootstraps or np.all(np.fabs(uncertainties < PRECISION)):
# No bootstraps or all uncertaintes are zero - return ordinary
# histogram
#output = np.histogram(values, intervals)[0]
output = hmtk_histogram_1D(values, intervals)
if normalisation:
output = output / float(np.sum(output))
else:
output = output
return output
else:
temp_hist = np.zeros([len(intervals) - 1, number_bootstraps],
dtype=float)
for iloc in range(0, number_bootstraps):
sample = sample_truncated_gaussian_vector(values,
uncertainties,
boundaries)
#output = np.histogram(sample, intervals)[0]
output = hmtk_histogram_1D(sample, intervals)
temp_hist[:, iloc] = output
output = np.sum(temp_hist, axis=1)
if normalisation:
output = output / float(np.sum(output))
else:
output = output / float(number_bootstraps)
return output | python | def bootstrap_histogram_1D(
values, intervals, uncertainties=None,
normalisation=False, number_bootstraps=None, boundaries=None):
'''
Bootstrap samples a set of vectors
:param numpy.ndarray values:
The data values
:param numpy.ndarray intervals:
The bin edges
:param numpy.ndarray uncertainties:
The standard deviations of each observation
:param bool normalisation:
If True then returns the histogram as a density function
:param int number_bootstraps:
Number of bootstraps
:param tuple boundaries:
(Lower, Upper) bounds on the data
:param returns:
1-D histogram of data
'''
if not number_bootstraps or np.all(np.fabs(uncertainties < PRECISION)):
# No bootstraps or all uncertaintes are zero - return ordinary
# histogram
#output = np.histogram(values, intervals)[0]
output = hmtk_histogram_1D(values, intervals)
if normalisation:
output = output / float(np.sum(output))
else:
output = output
return output
else:
temp_hist = np.zeros([len(intervals) - 1, number_bootstraps],
dtype=float)
for iloc in range(0, number_bootstraps):
sample = sample_truncated_gaussian_vector(values,
uncertainties,
boundaries)
#output = np.histogram(sample, intervals)[0]
output = hmtk_histogram_1D(sample, intervals)
temp_hist[:, iloc] = output
output = np.sum(temp_hist, axis=1)
if normalisation:
output = output / float(np.sum(output))
else:
output = output / float(number_bootstraps)
return output | ['def', 'bootstrap_histogram_1D', '(', 'values', ',', 'intervals', ',', 'uncertainties', '=', 'None', ',', 'normalisation', '=', 'False', ',', 'number_bootstraps', '=', 'None', ',', 'boundaries', '=', 'None', ')', ':', 'if', 'not', 'number_bootstraps', 'or', 'np', '.', 'all', '(', 'np', '.', 'fabs', '(', 'uncertainties', '<', 'PRECISION', ')', ')', ':', '# No bootstraps or all uncertaintes are zero - return ordinary', '# histogram', '#output = np.histogram(values, intervals)[0]', 'output', '=', 'hmtk_histogram_1D', '(', 'values', ',', 'intervals', ')', 'if', 'normalisation', ':', 'output', '=', 'output', '/', 'float', '(', 'np', '.', 'sum', '(', 'output', ')', ')', 'else', ':', 'output', '=', 'output', 'return', 'output', 'else', ':', 'temp_hist', '=', 'np', '.', 'zeros', '(', '[', 'len', '(', 'intervals', ')', '-', '1', ',', 'number_bootstraps', ']', ',', 'dtype', '=', 'float', ')', 'for', 'iloc', 'in', 'range', '(', '0', ',', 'number_bootstraps', ')', ':', 'sample', '=', 'sample_truncated_gaussian_vector', '(', 'values', ',', 'uncertainties', ',', 'boundaries', ')', '#output = np.histogram(sample, intervals)[0]', 'output', '=', 'hmtk_histogram_1D', '(', 'sample', ',', 'intervals', ')', 'temp_hist', '[', ':', ',', 'iloc', ']', '=', 'output', 'output', '=', 'np', '.', 'sum', '(', 'temp_hist', ',', 'axis', '=', '1', ')', 'if', 'normalisation', ':', 'output', '=', 'output', '/', 'float', '(', 'np', '.', 'sum', '(', 'output', ')', ')', 'else', ':', 'output', '=', 'output', '/', 'float', '(', 'number_bootstraps', ')', 'return', 'output'] | Bootstrap samples a set of vectors
:param numpy.ndarray values:
The data values
:param numpy.ndarray intervals:
The bin edges
:param numpy.ndarray uncertainties:
The standard deviations of each observation
:param bool normalisation:
If True then returns the histogram as a density function
:param int number_bootstraps:
Number of bootstraps
:param tuple boundaries:
(Lower, Upper) bounds on the data
:param returns:
1-D histogram of data | ['Bootstrap', 'samples', 'a', 'set', 'of', 'vectors'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/utils.py#L435-L483 |
6,645 | pytroll/posttroll | posttroll/subscriber.py | Subscriber._add_hook | def _add_hook(self, socket, callback):
"""Generic hook. The passed socket has to be "receive only".
"""
self._hooks.append(socket)
self._hooks_cb[socket] = callback
if self.poller:
self.poller.register(socket, POLLIN) | python | def _add_hook(self, socket, callback):
"""Generic hook. The passed socket has to be "receive only".
"""
self._hooks.append(socket)
self._hooks_cb[socket] = callback
if self.poller:
self.poller.register(socket, POLLIN) | ['def', '_add_hook', '(', 'self', ',', 'socket', ',', 'callback', ')', ':', 'self', '.', '_hooks', '.', 'append', '(', 'socket', ')', 'self', '.', '_hooks_cb', '[', 'socket', ']', '=', 'callback', 'if', 'self', '.', 'poller', ':', 'self', '.', 'poller', '.', 'register', '(', 'socket', ',', 'POLLIN', ')'] | Generic hook. The passed socket has to be "receive only". | ['Generic', 'hook', '.', 'The', 'passed', 'socket', 'has', 'to', 'be', 'receive', 'only', '.'] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L166-L172 |
6,646 | LettError/MutatorMath | Lib/mutatorMath/ufo/document.py | DesignSpaceDocumentWriter.writeInfo | def writeInfo(self, location=None, masters=None):
""" Write font into the current instance.
Note: the masters attribute is ignored at the moment.
"""
if self.currentInstance is None:
return
infoElement = ET.Element("info")
if location is not None:
locationElement = self._makeLocationElement(location)
infoElement.append(locationElement)
self.currentInstance.append(infoElement) | python | def writeInfo(self, location=None, masters=None):
""" Write font into the current instance.
Note: the masters attribute is ignored at the moment.
"""
if self.currentInstance is None:
return
infoElement = ET.Element("info")
if location is not None:
locationElement = self._makeLocationElement(location)
infoElement.append(locationElement)
self.currentInstance.append(infoElement) | ['def', 'writeInfo', '(', 'self', ',', 'location', '=', 'None', ',', 'masters', '=', 'None', ')', ':', 'if', 'self', '.', 'currentInstance', 'is', 'None', ':', 'return', 'infoElement', '=', 'ET', '.', 'Element', '(', '"info"', ')', 'if', 'location', 'is', 'not', 'None', ':', 'locationElement', '=', 'self', '.', '_makeLocationElement', '(', 'location', ')', 'infoElement', '.', 'append', '(', 'locationElement', ')', 'self', '.', 'currentInstance', '.', 'append', '(', 'infoElement', ')'] | Write font into the current instance.
Note: the masters attribute is ignored at the moment. | ['Write', 'font', 'into', 'the', 'current', 'instance', '.', 'Note', ':', 'the', 'masters', 'attribute', 'is', 'ignored', 'at', 'the', 'moment', '.'] | train | https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/document.py#L291-L301 |
6,647 | knagra/farnsworth | managers/forms.py | ManagerForm.clean | def clean(self):
''' TinyMCE adds a placeholder <br> if no data is inserted. In this case, remove it. '''
cleaned_data = super(ManagerForm, self).clean()
compensation = cleaned_data.get("compensation")
duties = cleaned_data.get("duties")
if compensation == '<br data-mce-bogus="1">':
cleaned_data["compensation"] = ""
if duties == '<br data-mce-bogus="1">':
cleaned_data["duties"] = ""
return cleaned_data | python | def clean(self):
''' TinyMCE adds a placeholder <br> if no data is inserted. In this case, remove it. '''
cleaned_data = super(ManagerForm, self).clean()
compensation = cleaned_data.get("compensation")
duties = cleaned_data.get("duties")
if compensation == '<br data-mce-bogus="1">':
cleaned_data["compensation"] = ""
if duties == '<br data-mce-bogus="1">':
cleaned_data["duties"] = ""
return cleaned_data | ['def', 'clean', '(', 'self', ')', ':', 'cleaned_data', '=', 'super', '(', 'ManagerForm', ',', 'self', ')', '.', 'clean', '(', ')', 'compensation', '=', 'cleaned_data', '.', 'get', '(', '"compensation"', ')', 'duties', '=', 'cleaned_data', '.', 'get', '(', '"duties"', ')', 'if', 'compensation', '==', '\'<br data-mce-bogus="1">\'', ':', 'cleaned_data', '[', '"compensation"', ']', '=', '""', 'if', 'duties', '==', '\'<br data-mce-bogus="1">\'', ':', 'cleaned_data', '[', '"duties"', ']', '=', '""', 'return', 'cleaned_data'] | TinyMCE adds a placeholder <br> if no data is inserted. In this case, remove it. | ['TinyMCE', 'adds', 'a', 'placeholder', '<br', '>', 'if', 'no', 'data', 'is', 'inserted', '.', 'In', 'this', 'case', 'remove', 'it', '.'] | train | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/managers/forms.py#L50-L59 |
6,648 | OCHA-DAP/hdx-python-api | src/hdx/data/user.py | User.email_users | def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs):
# type: (List['User'], str, str, Optional[str], Optional[str], Optional[Configuration], Any) -> None
"""Email a list of users
Args:
users (List[User]): List of users
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None
"""
if not users:
raise ValueError('No users supplied')
recipients = list()
for user in users:
recipients.append(user.data['email'])
if configuration is None:
configuration = users[0].configuration
configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs) | python | def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs):
# type: (List['User'], str, str, Optional[str], Optional[str], Optional[Configuration], Any) -> None
"""Email a list of users
Args:
users (List[User]): List of users
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None
"""
if not users:
raise ValueError('No users supplied')
recipients = list()
for user in users:
recipients.append(user.data['email'])
if configuration is None:
configuration = users[0].configuration
configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs) | ['def', 'email_users', '(', 'users', ',', 'subject', ',', 'text_body', ',', 'html_body', '=', 'None', ',', 'sender', '=', 'None', ',', 'configuration', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', "# type: (List['User'], str, str, Optional[str], Optional[str], Optional[Configuration], Any) -> None", 'if', 'not', 'users', ':', 'raise', 'ValueError', '(', "'No users supplied'", ')', 'recipients', '=', 'list', '(', ')', 'for', 'user', 'in', 'users', ':', 'recipients', '.', 'append', '(', 'user', '.', 'data', '[', "'email'", ']', ')', 'if', 'configuration', 'is', 'None', ':', 'configuration', '=', 'users', '[', '0', ']', '.', 'configuration', 'configuration', '.', 'emailer', '(', ')', '.', 'send', '(', 'recipients', ',', 'subject', ',', 'text_body', ',', 'html_body', '=', 'html_body', ',', 'sender', '=', 'sender', ',', '*', '*', 'kwargs', ')'] | Email a list of users
Args:
users (List[User]): List of users
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None | ['Email', 'a', 'list', 'of', 'users'] | train | https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/user.py#L184-L209 |
6,649 | ontio/ontology-python-sdk | ontology/wallet/wallet_manager.py | WalletManager.get_account_by_b58_address | def get_account_by_b58_address(self, b58_address: str, password: str) -> Account:
"""
:param b58_address: a base58 encode address.
:param password: a password which is used to decrypt the encrypted private key.
:return:
"""
acct = self.get_account_data_by_b58_address(b58_address)
n = self.wallet_in_mem.scrypt.n
salt = base64.b64decode(acct.salt)
private_key = Account.get_gcm_decoded_private_key(acct.key, password, b58_address, salt, n, self.scheme)
return Account(private_key, self.scheme) | python | def get_account_by_b58_address(self, b58_address: str, password: str) -> Account:
"""
:param b58_address: a base58 encode address.
:param password: a password which is used to decrypt the encrypted private key.
:return:
"""
acct = self.get_account_data_by_b58_address(b58_address)
n = self.wallet_in_mem.scrypt.n
salt = base64.b64decode(acct.salt)
private_key = Account.get_gcm_decoded_private_key(acct.key, password, b58_address, salt, n, self.scheme)
return Account(private_key, self.scheme) | ['def', 'get_account_by_b58_address', '(', 'self', ',', 'b58_address', ':', 'str', ',', 'password', ':', 'str', ')', '->', 'Account', ':', 'acct', '=', 'self', '.', 'get_account_data_by_b58_address', '(', 'b58_address', ')', 'n', '=', 'self', '.', 'wallet_in_mem', '.', 'scrypt', '.', 'n', 'salt', '=', 'base64', '.', 'b64decode', '(', 'acct', '.', 'salt', ')', 'private_key', '=', 'Account', '.', 'get_gcm_decoded_private_key', '(', 'acct', '.', 'key', ',', 'password', ',', 'b58_address', ',', 'salt', ',', 'n', ',', 'self', '.', 'scheme', ')', 'return', 'Account', '(', 'private_key', ',', 'self', '.', 'scheme', ')'] | :param b58_address: a base58 encode address.
:param password: a password which is used to decrypt the encrypted private key.
:return: | [':', 'param', 'b58_address', ':', 'a', 'base58', 'encode', 'address', '.', ':', 'param', 'password', ':', 'a', 'password', 'which', 'is', 'used', 'to', 'decrypt', 'the', 'encrypted', 'private', 'key', '.', ':', 'return', ':'] | train | https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/wallet/wallet_manager.py#L422-L432 |
6,650 | testing-cabal/mock | mock/mock.py | _patch.start | def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
return result | python | def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
return result | ['def', 'start', '(', 'self', ')', ':', 'result', '=', 'self', '.', '__enter__', '(', ')', 'self', '.', '_active_patches', '.', 'append', '(', 'self', ')', 'return', 'result'] | Activate a patch, returning any created mock. | ['Activate', 'a', 'patch', 'returning', 'any', 'created', 'mock', '.'] | train | https://github.com/testing-cabal/mock/blob/2f356b28d42a1fd0057c9d8763d3a2cac2284165/mock/mock.py#L1497-L1501 |
6,651 | PyThaiNLP/pythainlp | pythainlp/transliterate/royin.py | romanize | def romanize(text: str) -> str:
"""
Rendering Thai words in the Latin alphabet or "romanization",
using the Royal Thai General System of Transcription (RTGS),
which is the official system published by the Royal Institute of Thailand.
ถอดเสียงภาษาไทยเป็นอักษรละติน
:param str text: Thai text to be romanized
:return: A string of Thai words rendered in the Latin alphabet.
"""
words = word_tokenize(text)
romanized_words = [_romanize(word) for word in words]
return "".join(romanized_words) | python | def romanize(text: str) -> str:
"""
Rendering Thai words in the Latin alphabet or "romanization",
using the Royal Thai General System of Transcription (RTGS),
which is the official system published by the Royal Institute of Thailand.
ถอดเสียงภาษาไทยเป็นอักษรละติน
:param str text: Thai text to be romanized
:return: A string of Thai words rendered in the Latin alphabet.
"""
words = word_tokenize(text)
romanized_words = [_romanize(word) for word in words]
return "".join(romanized_words) | ['def', 'romanize', '(', 'text', ':', 'str', ')', '->', 'str', ':', 'words', '=', 'word_tokenize', '(', 'text', ')', 'romanized_words', '=', '[', '_romanize', '(', 'word', ')', 'for', 'word', 'in', 'words', ']', 'return', '""', '.', 'join', '(', 'romanized_words', ')'] | Rendering Thai words in the Latin alphabet or "romanization",
using the Royal Thai General System of Transcription (RTGS),
which is the official system published by the Royal Institute of Thailand.
ถอดเสียงภาษาไทยเป็นอักษรละติน
:param str text: Thai text to be romanized
:return: A string of Thai words rendered in the Latin alphabet. | ['Rendering', 'Thai', 'words', 'in', 'the', 'Latin', 'alphabet', 'or', 'romanization', 'using', 'the', 'Royal', 'Thai', 'General', 'System', 'of', 'Transcription', '(', 'RTGS', ')', 'which', 'is', 'the', 'official', 'system', 'published', 'by', 'the', 'Royal', 'Institute', 'of', 'Thailand', '.', 'ถอดเสียงภาษาไทยเป็นอักษรละติน', ':', 'param', 'str', 'text', ':', 'Thai', 'text', 'to', 'be', 'romanized', ':', 'return', ':', 'A', 'string', 'of', 'Thai', 'words', 'rendered', 'in', 'the', 'Latin', 'alphabet', '.'] | train | https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/transliterate/royin.py#L199-L211 |
6,652 | msfrank/cifparser | cifparser/grammar.py | parse_line | def parse_line(text):
"""
:param text:
:type text: str
:return:
"""
indent,text = calculate_indent(text)
results = line_parser.parseString(text, parseAll=True).asList()
return indent,results[0] | python | def parse_line(text):
"""
:param text:
:type text: str
:return:
"""
indent,text = calculate_indent(text)
results = line_parser.parseString(text, parseAll=True).asList()
return indent,results[0] | ['def', 'parse_line', '(', 'text', ')', ':', 'indent', ',', 'text', '=', 'calculate_indent', '(', 'text', ')', 'results', '=', 'line_parser', '.', 'parseString', '(', 'text', ',', 'parseAll', '=', 'True', ')', '.', 'asList', '(', ')', 'return', 'indent', ',', 'results', '[', '0', ']'] | :param text:
:type text: str
:return: | [':', 'param', 'text', ':', ':', 'type', 'text', ':', 'str', ':', 'return', ':'] | train | https://github.com/msfrank/cifparser/blob/ecd899ba2e7b990e2cec62b115742d830e7e4384/cifparser/grammar.py#L73-L81 |
6,653 | cloudant/python-cloudant | src/cloudant/design_document.py | DesignDocument.update_list_function | def update_list_function(self, list_name, list_func):
"""
Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function.
"""
if self.get_list_function(list_name) is None:
raise CloudantArgumentError(113, list_name)
self.lists.__setitem__(list_name, codify(list_func)) | python | def update_list_function(self, list_name, list_func):
"""
Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function.
"""
if self.get_list_function(list_name) is None:
raise CloudantArgumentError(113, list_name)
self.lists.__setitem__(list_name, codify(list_func)) | ['def', 'update_list_function', '(', 'self', ',', 'list_name', ',', 'list_func', ')', ':', 'if', 'self', '.', 'get_list_function', '(', 'list_name', ')', 'is', 'None', ':', 'raise', 'CloudantArgumentError', '(', '113', ',', 'list_name', ')', 'self', '.', 'lists', '.', '__setitem__', '(', 'list_name', ',', 'codify', '(', 'list_func', ')', ')'] | Modifies/overwrites an existing list function in the
locally cached DesignDocument indexes dictionary.
:param str list_name: Name used to identify the list function.
:param str list_func: Javascript list function. | ['Modifies', '/', 'overwrites', 'an', 'existing', 'list', 'function', 'in', 'the', 'locally', 'cached', 'DesignDocument', 'indexes', 'dictionary', '.'] | train | https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/design_document.py#L399-L410 |
6,654 | Azure/azure-cli-extensions | src/alias/azext_alias/hooks.py | enable_aliases_autocomplete | def enable_aliases_autocomplete(_, **kwargs):
"""
Enable aliases autocomplete by injecting aliases into Azure CLI tab completion list.
"""
external_completions = kwargs.get('external_completions', [])
prefix = kwargs.get('cword_prefix', [])
cur_commands = kwargs.get('comp_words', [])
alias_table = get_alias_table()
# Transform aliases if they are in current commands,
# so parser can get the correct subparser when chaining aliases
_transform_cur_commands(cur_commands, alias_table=alias_table)
for alias, alias_command in filter_aliases(alias_table):
if alias.startswith(prefix) and alias.strip() != prefix and _is_autocomplete_valid(cur_commands, alias_command):
# Only autocomplete the first word because alias is space-delimited
external_completions.append(alias)
# Append spaces if necessary (https://github.com/kislyuk/argcomplete/blob/master/argcomplete/__init__.py#L552-L559)
prequote = kwargs.get('cword_prequote', '')
continuation_chars = "=/:"
if len(external_completions) == 1 and external_completions[0][-1] not in continuation_chars and not prequote:
external_completions[0] += ' ' | python | def enable_aliases_autocomplete(_, **kwargs):
"""
Enable aliases autocomplete by injecting aliases into Azure CLI tab completion list.
"""
external_completions = kwargs.get('external_completions', [])
prefix = kwargs.get('cword_prefix', [])
cur_commands = kwargs.get('comp_words', [])
alias_table = get_alias_table()
# Transform aliases if they are in current commands,
# so parser can get the correct subparser when chaining aliases
_transform_cur_commands(cur_commands, alias_table=alias_table)
for alias, alias_command in filter_aliases(alias_table):
if alias.startswith(prefix) and alias.strip() != prefix and _is_autocomplete_valid(cur_commands, alias_command):
# Only autocomplete the first word because alias is space-delimited
external_completions.append(alias)
# Append spaces if necessary (https://github.com/kislyuk/argcomplete/blob/master/argcomplete/__init__.py#L552-L559)
prequote = kwargs.get('cword_prequote', '')
continuation_chars = "=/:"
if len(external_completions) == 1 and external_completions[0][-1] not in continuation_chars and not prequote:
external_completions[0] += ' ' | ['def', 'enable_aliases_autocomplete', '(', '_', ',', '*', '*', 'kwargs', ')', ':', 'external_completions', '=', 'kwargs', '.', 'get', '(', "'external_completions'", ',', '[', ']', ')', 'prefix', '=', 'kwargs', '.', 'get', '(', "'cword_prefix'", ',', '[', ']', ')', 'cur_commands', '=', 'kwargs', '.', 'get', '(', "'comp_words'", ',', '[', ']', ')', 'alias_table', '=', 'get_alias_table', '(', ')', '# Transform aliases if they are in current commands,', '# so parser can get the correct subparser when chaining aliases', '_transform_cur_commands', '(', 'cur_commands', ',', 'alias_table', '=', 'alias_table', ')', 'for', 'alias', ',', 'alias_command', 'in', 'filter_aliases', '(', 'alias_table', ')', ':', 'if', 'alias', '.', 'startswith', '(', 'prefix', ')', 'and', 'alias', '.', 'strip', '(', ')', '!=', 'prefix', 'and', '_is_autocomplete_valid', '(', 'cur_commands', ',', 'alias_command', ')', ':', '# Only autocomplete the first word because alias is space-delimited', 'external_completions', '.', 'append', '(', 'alias', ')', '# Append spaces if necessary (https://github.com/kislyuk/argcomplete/blob/master/argcomplete/__init__.py#L552-L559)', 'prequote', '=', 'kwargs', '.', 'get', '(', "'cword_prequote'", ',', "''", ')', 'continuation_chars', '=', '"=/:"', 'if', 'len', '(', 'external_completions', ')', '==', '1', 'and', 'external_completions', '[', '0', ']', '[', '-', '1', ']', 'not', 'in', 'continuation_chars', 'and', 'not', 'prequote', ':', 'external_completions', '[', '0', ']', '+=', "' '"] | Enable aliases autocomplete by injecting aliases into Azure CLI tab completion list. | ['Enable', 'aliases', 'autocomplete', 'by', 'injecting', 'aliases', 'into', 'Azure', 'CLI', 'tab', 'completion', 'list', '.'] | train | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/alias/azext_alias/hooks.py#L54-L75 |
6,655 | titusjan/argos | argos/repo/repotreemodel.py | RepoTreeModel.itemData | def itemData(self, treeItem, column, role=Qt.DisplayRole):
""" Returns the data stored under the given role for the item. O
"""
if role == Qt.DisplayRole:
if column == self.COL_NODE_NAME:
return treeItem.nodeName
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_IS_OPEN:
# Only show for RTIs that actually open resources.
# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?
if treeItem.hasChildren():
return str(treeItem.isOpen)
else:
return ""
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_EXCEPTION:
return str(treeItem.exception) if treeItem.exception else ''
else:
raise ValueError("Invalid column: {}".format(column))
elif role == Qt.ToolTipRole:
if treeItem.exception:
return str(treeItem.exception)
if column == self.COL_NODE_NAME:
return treeItem.nodePath # Also path when hovering over the name
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
else:
return None
else:
return super(RepoTreeModel, self).itemData(treeItem, column, role=role) | python | def itemData(self, treeItem, column, role=Qt.DisplayRole):
""" Returns the data stored under the given role for the item. O
"""
if role == Qt.DisplayRole:
if column == self.COL_NODE_NAME:
return treeItem.nodeName
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_IS_OPEN:
# Only show for RTIs that actually open resources.
# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?
if treeItem.hasChildren():
return str(treeItem.isOpen)
else:
return ""
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_EXCEPTION:
return str(treeItem.exception) if treeItem.exception else ''
else:
raise ValueError("Invalid column: {}".format(column))
elif role == Qt.ToolTipRole:
if treeItem.exception:
return str(treeItem.exception)
if column == self.COL_NODE_NAME:
return treeItem.nodePath # Also path when hovering over the name
elif column == self.COL_NODE_PATH:
return treeItem.nodePath
elif column == self.COL_SHAPE:
if treeItem.isSliceable:
return " x ".join(str(elem) for elem in treeItem.arrayShape)
else:
return ""
elif column == self.COL_UNIT:
return treeItem.unit
elif column == self.COL_MISSING_DATA:
return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones
elif column == self.COL_RTI_TYPE:
return type_name(treeItem)
elif column == self.COL_ELEM_TYPE:
return treeItem.elementTypeName
elif column == self.COL_FILE_NAME:
return treeItem.fileName if hasattr(treeItem, 'fileName') else ''
else:
return None
else:
return super(RepoTreeModel, self).itemData(treeItem, column, role=role) | ['def', 'itemData', '(', 'self', ',', 'treeItem', ',', 'column', ',', 'role', '=', 'Qt', '.', 'DisplayRole', ')', ':', 'if', 'role', '==', 'Qt', '.', 'DisplayRole', ':', 'if', 'column', '==', 'self', '.', 'COL_NODE_NAME', ':', 'return', 'treeItem', '.', 'nodeName', 'elif', 'column', '==', 'self', '.', 'COL_NODE_PATH', ':', 'return', 'treeItem', '.', 'nodePath', 'elif', 'column', '==', 'self', '.', 'COL_SHAPE', ':', 'if', 'treeItem', '.', 'isSliceable', ':', 'return', '" x "', '.', 'join', '(', 'str', '(', 'elem', ')', 'for', 'elem', 'in', 'treeItem', '.', 'arrayShape', ')', 'else', ':', 'return', '""', 'elif', 'column', '==', 'self', '.', 'COL_IS_OPEN', ':', '# Only show for RTIs that actually open resources.', '# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?', 'if', 'treeItem', '.', 'hasChildren', '(', ')', ':', 'return', 'str', '(', 'treeItem', '.', 'isOpen', ')', 'else', ':', 'return', '""', 'elif', 'column', '==', 'self', '.', 'COL_ELEM_TYPE', ':', 'return', 'treeItem', '.', 'elementTypeName', 'elif', 'column', '==', 'self', '.', 'COL_FILE_NAME', ':', 'return', 'treeItem', '.', 'fileName', 'if', 'hasattr', '(', 'treeItem', ',', "'fileName'", ')', 'else', "''", 'elif', 'column', '==', 'self', '.', 'COL_UNIT', ':', 'return', 'treeItem', '.', 'unit', 'elif', 'column', '==', 'self', '.', 'COL_MISSING_DATA', ':', 'return', 'to_string', '(', 'treeItem', '.', 'missingDataValue', ',', 'noneFormat', '=', "''", ')', '# empty str for Nones', 'elif', 'column', '==', 'self', '.', 'COL_RTI_TYPE', ':', 'return', 'type_name', '(', 'treeItem', ')', 'elif', 'column', '==', 'self', '.', 'COL_EXCEPTION', ':', 'return', 'str', '(', 'treeItem', '.', 'exception', ')', 'if', 'treeItem', '.', 'exception', 'else', "''", 'else', ':', 'raise', 'ValueError', '(', '"Invalid column: {}"', '.', 'format', '(', 'column', ')', ')', 'elif', 'role', '==', 'Qt', '.', 'ToolTipRole', ':', 'if', 'treeItem', '.', 'exception', ':', 'return', 'str', '(', 'treeItem', '.', 'exception', ')', 'if', 'column', '==', 'self', '.', 'COL_NODE_NAME', ':', 'return', 'treeItem', '.', 'nodePath', '# Also path when hovering over the name', 'elif', 'column', '==', 'self', '.', 'COL_NODE_PATH', ':', 'return', 'treeItem', '.', 'nodePath', 'elif', 'column', '==', 'self', '.', 'COL_SHAPE', ':', 'if', 'treeItem', '.', 'isSliceable', ':', 'return', '" x "', '.', 'join', '(', 'str', '(', 'elem', ')', 'for', 'elem', 'in', 'treeItem', '.', 'arrayShape', ')', 'else', ':', 'return', '""', 'elif', 'column', '==', 'self', '.', 'COL_UNIT', ':', 'return', 'treeItem', '.', 'unit', 'elif', 'column', '==', 'self', '.', 'COL_MISSING_DATA', ':', 'return', 'to_string', '(', 'treeItem', '.', 'missingDataValue', ',', 'noneFormat', '=', "''", ')', '# empty str for Nones', 'elif', 'column', '==', 'self', '.', 'COL_RTI_TYPE', ':', 'return', 'type_name', '(', 'treeItem', ')', 'elif', 'column', '==', 'self', '.', 'COL_ELEM_TYPE', ':', 'return', 'treeItem', '.', 'elementTypeName', 'elif', 'column', '==', 'self', '.', 'COL_FILE_NAME', ':', 'return', 'treeItem', '.', 'fileName', 'if', 'hasattr', '(', 'treeItem', ',', "'fileName'", ')', 'else', "''", 'else', ':', 'return', 'None', 'else', ':', 'return', 'super', '(', 'RepoTreeModel', ',', 'self', ')', '.', 'itemData', '(', 'treeItem', ',', 'column', ',', 'role', '=', 'role', ')'] | Returns the data stored under the given role for the item. O | ['Returns', 'the', 'data', 'stored', 'under', 'the', 'given', 'role', 'for', 'the', 'item', '.', 'O'] | train | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/repotreemodel.py#L53-L113 |
6,656 | Microsoft/nni | src/sdk/pynni/nni/curvefitting_assessor/model_factory.py | CurveModel.predict | def predict(self, trial_history):
"""predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
"""
self.trial_history = trial_history
self.point_num = len(trial_history)
self.fit_theta()
self.filter_curve()
if self.effective_model_num < LEAST_FITTED_FUNCTION:
# different curve's predictions are too scattered, requires more information
return None
self.mcmc_sampling()
ret = 0
for i in range(NUM_OF_INSTANCE):
ret += self.f_comb(self.target_pos, self.weight_samples[i])
return ret / NUM_OF_INSTANCE | python | def predict(self, trial_history):
"""predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config
"""
self.trial_history = trial_history
self.point_num = len(trial_history)
self.fit_theta()
self.filter_curve()
if self.effective_model_num < LEAST_FITTED_FUNCTION:
# different curve's predictions are too scattered, requires more information
return None
self.mcmc_sampling()
ret = 0
for i in range(NUM_OF_INSTANCE):
ret += self.f_comb(self.target_pos, self.weight_samples[i])
return ret / NUM_OF_INSTANCE | ['def', 'predict', '(', 'self', ',', 'trial_history', ')', ':', 'self', '.', 'trial_history', '=', 'trial_history', 'self', '.', 'point_num', '=', 'len', '(', 'trial_history', ')', 'self', '.', 'fit_theta', '(', ')', 'self', '.', 'filter_curve', '(', ')', 'if', 'self', '.', 'effective_model_num', '<', 'LEAST_FITTED_FUNCTION', ':', "# different curve's predictions are too scattered, requires more information", 'return', 'None', 'self', '.', 'mcmc_sampling', '(', ')', 'ret', '=', '0', 'for', 'i', 'in', 'range', '(', 'NUM_OF_INSTANCE', ')', ':', 'ret', '+=', 'self', '.', 'f_comb', '(', 'self', '.', 'target_pos', ',', 'self', '.', 'weight_samples', '[', 'i', ']', ')', 'return', 'ret', '/', 'NUM_OF_INSTANCE'] | predict the value of target position
Parameters
----------
trial_history: list
The history performance matrix of each trial.
Returns
-------
float
expected final result performance of this hyperparameter config | ['predict', 'the', 'value', 'of', 'target', 'position', 'Parameters', '----------', 'trial_history', ':', 'list', 'The', 'history', 'performance', 'matrix', 'of', 'each', 'trial', '.'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/curvefitting_assessor/model_factory.py#L320-L344 |
6,657 | grundic/yagocd | yagocd/resources/job.py | JobInstance.properties | def properties(self):
"""
Property for accessing property (doh!) manager of the current job.
:return: instance of :class:`yagocd.resources.property.PropertyManager`
:rtype: yagocd.resources.property.PropertyManager
"""
return PropertyManager(
session=self._session,
pipeline_name=self.pipeline_name,
pipeline_counter=self.pipeline_counter,
stage_name=self.stage_name,
stage_counter=self.stage_counter,
job_name=self.data.name
) | python | def properties(self):
"""
Property for accessing property (doh!) manager of the current job.
:return: instance of :class:`yagocd.resources.property.PropertyManager`
:rtype: yagocd.resources.property.PropertyManager
"""
return PropertyManager(
session=self._session,
pipeline_name=self.pipeline_name,
pipeline_counter=self.pipeline_counter,
stage_name=self.stage_name,
stage_counter=self.stage_counter,
job_name=self.data.name
) | ['def', 'properties', '(', 'self', ')', ':', 'return', 'PropertyManager', '(', 'session', '=', 'self', '.', '_session', ',', 'pipeline_name', '=', 'self', '.', 'pipeline_name', ',', 'pipeline_counter', '=', 'self', '.', 'pipeline_counter', ',', 'stage_name', '=', 'self', '.', 'stage_name', ',', 'stage_counter', '=', 'self', '.', 'stage_counter', ',', 'job_name', '=', 'self', '.', 'data', '.', 'name', ')'] | Property for accessing property (doh!) manager of the current job.
:return: instance of :class:`yagocd.resources.property.PropertyManager`
:rtype: yagocd.resources.property.PropertyManager | ['Property', 'for', 'accessing', 'property', '(', 'doh!', ')', 'manager', 'of', 'the', 'current', 'job', '.'] | train | https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/resources/job.py#L235-L249 |
6,658 | googleapis/google-cloud-python | storage/google/cloud/storage/_helpers.py | _PropertyMixin._patch_property | def _patch_property(self, name, value):
"""Update field of this object's properties.
This method will only update the field provided and will not
touch the other fields.
It **will not** reload the properties from the server. The behavior is
local only and syncing occurs via :meth:`patch`.
:type name: str
:param name: The field name to update.
:type value: object
:param value: The value being updated.
"""
self._changes.add(name)
self._properties[name] = value | python | def _patch_property(self, name, value):
"""Update field of this object's properties.
This method will only update the field provided and will not
touch the other fields.
It **will not** reload the properties from the server. The behavior is
local only and syncing occurs via :meth:`patch`.
:type name: str
:param name: The field name to update.
:type value: object
:param value: The value being updated.
"""
self._changes.add(name)
self._properties[name] = value | ['def', '_patch_property', '(', 'self', ',', 'name', ',', 'value', ')', ':', 'self', '.', '_changes', '.', 'add', '(', 'name', ')', 'self', '.', '_properties', '[', 'name', ']', '=', 'value'] | Update field of this object's properties.
This method will only update the field provided and will not
touch the other fields.
It **will not** reload the properties from the server. The behavior is
local only and syncing occurs via :meth:`patch`.
:type name: str
:param name: The field name to update.
:type value: object
:param value: The value being updated. | ['Update', 'field', 'of', 'this', 'object', 's', 'properties', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/_helpers.py#L134-L150 |
6,659 | marshallward/f90nml | f90nml/namelist.py | Namelist.end_comma | def end_comma(self, value):
"""Validate and set the comma termination flag."""
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value | python | def end_comma(self, value):
"""Validate and set the comma termination flag."""
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value | ['def', 'end_comma', '(', 'self', ',', 'value', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'raise', 'TypeError', '(', "'end_comma attribute must be a logical type.'", ')', 'self', '.', '_end_comma', '=', 'value'] | Validate and set the comma termination flag. | ['Validate', 'and', 'set', 'the', 'comma', 'termination', 'flag', '.'] | train | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L212-L216 |
6,660 | foremast/foremast | src/foremast/elb/format_listeners.py | format_listeners | def format_listeners(elb_settings=None, env='dev', region='us-east-1'):
"""Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
"""
LOG.debug('ELB settings:\n%s', elb_settings)
credential = get_env_credential(env=env)
account = credential['accountId']
listeners = []
if 'ports' in elb_settings:
for listener in elb_settings['ports']:
cert_name = format_cert_name(
env=env, region=region, account=account, certificate=listener.get('certificate', None))
lb_proto, lb_port = listener['loadbalancer'].split(':')
i_proto, i_port = listener['instance'].split(':')
listener_policies = listener.get('policies', [])
listener_policies += listener.get('listener_policies', [])
backend_policies = listener.get('backend_policies', [])
elb_data = {
'externalPort': int(lb_port),
'externalProtocol': lb_proto.upper(),
'internalPort': int(i_port),
'internalProtocol': i_proto.upper(),
'sslCertificateId': cert_name,
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}
listeners.append(elb_data)
else:
listener_policies = elb_settings.get('policies', [])
listener_policies += elb_settings.get('listener_policies', [])
backend_policies = elb_settings.get('backend_policies', [])
listeners = [{
'externalPort': int(elb_settings['lb_port']),
'externalProtocol': elb_settings['lb_proto'],
'internalPort': int(elb_settings['i_port']),
'internalProtocol': elb_settings['i_proto'],
'sslCertificateId': elb_settings['certificate'],
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}]
for listener in listeners:
LOG.info('ELB Listener:\n'
'loadbalancer %(externalProtocol)s:%(externalPort)d\n'
'instance %(internalProtocol)s:%(internalPort)d\n'
'certificate: %(sslCertificateId)s\n'
'listener_policies: %(listenerPolicies)s\n'
'backend_policies: %(backendPolicies)s', listener)
return listeners | python | def format_listeners(elb_settings=None, env='dev', region='us-east-1'):
"""Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
"""
LOG.debug('ELB settings:\n%s', elb_settings)
credential = get_env_credential(env=env)
account = credential['accountId']
listeners = []
if 'ports' in elb_settings:
for listener in elb_settings['ports']:
cert_name = format_cert_name(
env=env, region=region, account=account, certificate=listener.get('certificate', None))
lb_proto, lb_port = listener['loadbalancer'].split(':')
i_proto, i_port = listener['instance'].split(':')
listener_policies = listener.get('policies', [])
listener_policies += listener.get('listener_policies', [])
backend_policies = listener.get('backend_policies', [])
elb_data = {
'externalPort': int(lb_port),
'externalProtocol': lb_proto.upper(),
'internalPort': int(i_port),
'internalProtocol': i_proto.upper(),
'sslCertificateId': cert_name,
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}
listeners.append(elb_data)
else:
listener_policies = elb_settings.get('policies', [])
listener_policies += elb_settings.get('listener_policies', [])
backend_policies = elb_settings.get('backend_policies', [])
listeners = [{
'externalPort': int(elb_settings['lb_port']),
'externalProtocol': elb_settings['lb_proto'],
'internalPort': int(elb_settings['i_port']),
'internalProtocol': elb_settings['i_proto'],
'sslCertificateId': elb_settings['certificate'],
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}]
for listener in listeners:
LOG.info('ELB Listener:\n'
'loadbalancer %(externalProtocol)s:%(externalPort)d\n'
'instance %(internalProtocol)s:%(internalPort)d\n'
'certificate: %(sslCertificateId)s\n'
'listener_policies: %(listenerPolicies)s\n'
'backend_policies: %(backendPolicies)s', listener)
return listeners | ['def', 'format_listeners', '(', 'elb_settings', '=', 'None', ',', 'env', '=', "'dev'", ',', 'region', '=', "'us-east-1'", ')', ':', 'LOG', '.', 'debug', '(', "'ELB settings:\\n%s'", ',', 'elb_settings', ')', 'credential', '=', 'get_env_credential', '(', 'env', '=', 'env', ')', 'account', '=', 'credential', '[', "'accountId'", ']', 'listeners', '=', '[', ']', 'if', "'ports'", 'in', 'elb_settings', ':', 'for', 'listener', 'in', 'elb_settings', '[', "'ports'", ']', ':', 'cert_name', '=', 'format_cert_name', '(', 'env', '=', 'env', ',', 'region', '=', 'region', ',', 'account', '=', 'account', ',', 'certificate', '=', 'listener', '.', 'get', '(', "'certificate'", ',', 'None', ')', ')', 'lb_proto', ',', 'lb_port', '=', 'listener', '[', "'loadbalancer'", ']', '.', 'split', '(', "':'", ')', 'i_proto', ',', 'i_port', '=', 'listener', '[', "'instance'", ']', '.', 'split', '(', "':'", ')', 'listener_policies', '=', 'listener', '.', 'get', '(', "'policies'", ',', '[', ']', ')', 'listener_policies', '+=', 'listener', '.', 'get', '(', "'listener_policies'", ',', '[', ']', ')', 'backend_policies', '=', 'listener', '.', 'get', '(', "'backend_policies'", ',', '[', ']', ')', 'elb_data', '=', '{', "'externalPort'", ':', 'int', '(', 'lb_port', ')', ',', "'externalProtocol'", ':', 'lb_proto', '.', 'upper', '(', ')', ',', "'internalPort'", ':', 'int', '(', 'i_port', ')', ',', "'internalProtocol'", ':', 'i_proto', '.', 'upper', '(', ')', ',', "'sslCertificateId'", ':', 'cert_name', ',', "'listenerPolicies'", ':', 'listener_policies', ',', "'backendPolicies'", ':', 'backend_policies', ',', '}', 'listeners', '.', 'append', '(', 'elb_data', ')', 'else', ':', 'listener_policies', '=', 'elb_settings', '.', 'get', '(', "'policies'", ',', '[', ']', ')', 'listener_policies', '+=', 'elb_settings', '.', 'get', '(', "'listener_policies'", ',', '[', ']', ')', 'backend_policies', '=', 'elb_settings', '.', 'get', '(', "'backend_policies'", ',', '[', ']', ')', 'listeners', '=', '[', '{', "'externalPort'", ':', 'int', '(', 'elb_settings', '[', "'lb_port'", ']', ')', ',', "'externalProtocol'", ':', 'elb_settings', '[', "'lb_proto'", ']', ',', "'internalPort'", ':', 'int', '(', 'elb_settings', '[', "'i_port'", ']', ')', ',', "'internalProtocol'", ':', 'elb_settings', '[', "'i_proto'", ']', ',', "'sslCertificateId'", ':', 'elb_settings', '[', "'certificate'", ']', ',', "'listenerPolicies'", ':', 'listener_policies', ',', "'backendPolicies'", ':', 'backend_policies', ',', '}', ']', 'for', 'listener', 'in', 'listeners', ':', 'LOG', '.', 'info', '(', "'ELB Listener:\\n'", "'loadbalancer %(externalProtocol)s:%(externalPort)d\\n'", "'instance %(internalProtocol)s:%(internalPort)d\\n'", "'certificate: %(sslCertificateId)s\\n'", "'listener_policies: %(listenerPolicies)s\\n'", "'backend_policies: %(backendPolicies)s'", ',', 'listener', ')', 'return', 'listeners'] | Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
] | ['Format', 'ELB', 'Listeners', 'into', 'standard', 'list', '.'] | train | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/elb/format_listeners.py#L26-L128 |
6,661 | GPflow/GPflow | gpflow/likelihoods.py | Likelihood.predict_density | def predict_density(self, Fmu, Fvar, Y):
r"""
Given a Normal distribution for the latent function, and a datum Y,
compute the log predictive density of Y.
i.e. if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive density
\log \int p(y=Y|f)q(f) df
Here, we implement a default Gauss-Hermite quadrature routine, but some
likelihoods (Gaussian, Poisson) will implement specific cases.
"""
return ndiagquad(self.logp,
self.num_gauss_hermite_points,
Fmu, Fvar, logspace=True, Y=Y) | python | def predict_density(self, Fmu, Fvar, Y):
r"""
Given a Normal distribution for the latent function, and a datum Y,
compute the log predictive density of Y.
i.e. if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive density
\log \int p(y=Y|f)q(f) df
Here, we implement a default Gauss-Hermite quadrature routine, but some
likelihoods (Gaussian, Poisson) will implement specific cases.
"""
return ndiagquad(self.logp,
self.num_gauss_hermite_points,
Fmu, Fvar, logspace=True, Y=Y) | ['def', 'predict_density', '(', 'self', ',', 'Fmu', ',', 'Fvar', ',', 'Y', ')', ':', 'return', 'ndiagquad', '(', 'self', '.', 'logp', ',', 'self', '.', 'num_gauss_hermite_points', ',', 'Fmu', ',', 'Fvar', ',', 'logspace', '=', 'True', ',', 'Y', '=', 'Y', ')'] | r"""
Given a Normal distribution for the latent function, and a datum Y,
compute the log predictive density of Y.
i.e. if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive density
\log \int p(y=Y|f)q(f) df
Here, we implement a default Gauss-Hermite quadrature routine, but some
likelihoods (Gaussian, Poisson) will implement specific cases. | ['r', 'Given', 'a', 'Normal', 'distribution', 'for', 'the', 'latent', 'function', 'and', 'a', 'datum', 'Y', 'compute', 'the', 'log', 'predictive', 'density', 'of', 'Y', '.'] | train | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/likelihoods.py#L106-L127 |
6,662 | santoshphilip/eppy | eppy/fanpower.py | watts2pascal | def watts2pascal(watts, cfm, fan_tot_eff):
"""convert and return inputs for E+ in pascal and m3/s"""
bhp = watts2bhp(watts)
return bhp2pascal(bhp, cfm, fan_tot_eff) | python | def watts2pascal(watts, cfm, fan_tot_eff):
"""convert and return inputs for E+ in pascal and m3/s"""
bhp = watts2bhp(watts)
return bhp2pascal(bhp, cfm, fan_tot_eff) | ['def', 'watts2pascal', '(', 'watts', ',', 'cfm', ',', 'fan_tot_eff', ')', ':', 'bhp', '=', 'watts2bhp', '(', 'watts', ')', 'return', 'bhp2pascal', '(', 'bhp', ',', 'cfm', ',', 'fan_tot_eff', ')'] | convert and return inputs for E+ in pascal and m3/s | ['convert', 'and', 'return', 'inputs', 'for', 'E', '+', 'in', 'pascal', 'and', 'm3', '/', 's'] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L67-L70 |
6,663 | pyviz/holoviews | holoviews/plotting/bokeh/renderer.py | BokehRenderer.server_doc | def server_doc(self_or_cls, obj, doc=None):
"""
Get a bokeh Document with the plot attached. May supply
an existing doc, otherwise bokeh.io.curdoc() is used to
attach the plot to the global document instance.
"""
if not isinstance(obj, (Plot, BokehServerWidgets)):
if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server':
renderer = self_or_cls.instance(mode='server')
else:
renderer = self_or_cls
plot, _ = renderer._validate(obj, 'auto')
else:
plot = obj
root = plot.state
if isinstance(plot, BokehServerWidgets):
plot = plot.plot
if doc is None:
doc = plot.document
else:
plot.document = doc
plot.traverse(lambda x: attach_periodic(x), [GenericElementPlot])
doc.add_root(root)
return doc | python | def server_doc(self_or_cls, obj, doc=None):
"""
Get a bokeh Document with the plot attached. May supply
an existing doc, otherwise bokeh.io.curdoc() is used to
attach the plot to the global document instance.
"""
if not isinstance(obj, (Plot, BokehServerWidgets)):
if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server':
renderer = self_or_cls.instance(mode='server')
else:
renderer = self_or_cls
plot, _ = renderer._validate(obj, 'auto')
else:
plot = obj
root = plot.state
if isinstance(plot, BokehServerWidgets):
plot = plot.plot
if doc is None:
doc = plot.document
else:
plot.document = doc
plot.traverse(lambda x: attach_periodic(x), [GenericElementPlot])
doc.add_root(root)
return doc | ['def', 'server_doc', '(', 'self_or_cls', ',', 'obj', ',', 'doc', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'obj', ',', '(', 'Plot', ',', 'BokehServerWidgets', ')', ')', ':', 'if', 'not', 'isinstance', '(', 'self_or_cls', ',', 'BokehRenderer', ')', 'or', 'self_or_cls', '.', 'mode', '!=', "'server'", ':', 'renderer', '=', 'self_or_cls', '.', 'instance', '(', 'mode', '=', "'server'", ')', 'else', ':', 'renderer', '=', 'self_or_cls', 'plot', ',', '_', '=', 'renderer', '.', '_validate', '(', 'obj', ',', "'auto'", ')', 'else', ':', 'plot', '=', 'obj', 'root', '=', 'plot', '.', 'state', 'if', 'isinstance', '(', 'plot', ',', 'BokehServerWidgets', ')', ':', 'plot', '=', 'plot', '.', 'plot', 'if', 'doc', 'is', 'None', ':', 'doc', '=', 'plot', '.', 'document', 'else', ':', 'plot', '.', 'document', '=', 'doc', 'plot', '.', 'traverse', '(', 'lambda', 'x', ':', 'attach_periodic', '(', 'x', ')', ',', '[', 'GenericElementPlot', ']', ')', 'doc', '.', 'add_root', '(', 'root', ')', 'return', 'doc'] | Get a bokeh Document with the plot attached. May supply
an existing doc, otherwise bokeh.io.curdoc() is used to
attach the plot to the global document instance. | ['Get', 'a', 'bokeh', 'Document', 'with', 'the', 'plot', 'attached', '.', 'May', 'supply', 'an', 'existing', 'doc', 'otherwise', 'bokeh', '.', 'io', '.', 'curdoc', '()', 'is', 'used', 'to', 'attach', 'the', 'plot', 'to', 'the', 'global', 'document', 'instance', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/renderer.py#L218-L244 |
6,664 | akesterson/dpath-python | dpath/util.py | merge | def merge(dst, src, separator="/", afilter=None, flags=MERGE_ADDITIVE, _path=""):
"""Merge source into destination. Like dict.update() but performs
deep merging.
flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE, or
MERGE_TYPESAFE.
* MERGE_ADDITIVE : List objects are combined onto one long
list (NOT a set). This is the default flag.
* MERGE_REPLACE : Instead of combining list objects, when
2 list objects are at an equal depth of merge, replace
the destination with the source.
* MERGE_TYPESAFE : When 2 keys at equal levels are of different
types, raise a TypeError exception. By default, the source
replaces the destination in this situation.
"""
if afilter:
# Having merge do its own afiltering is dumb, let search do the
# heavy lifting for us.
src = search(src, '**', afilter=afilter)
return merge(dst, src)
def _check_typesafe(obj1, obj2, key, path):
if not key in obj1:
return
elif ( (flags & MERGE_TYPESAFE == MERGE_TYPESAFE) and (type(obj1[key]) != type(obj2[key]))):
raise TypeError("Cannot merge objects of type {0} and {1} at {2}"
"".format(type(obj1[key]), type(obj2[key]), path))
elif ( (flags & MERGE_TYPESAFE != MERGE_TYPESAFE) and (type(obj1[key]) != type(obj2[key]))):
obj1.pop(key)
if isinstance(src, MutableMapping):
for (i, v) in enumerate(src):
_check_typesafe(dst, src, v, separator.join([_path, str(v)]))
if not v in dst:
dst[v] = src[v]
else:
if not isinstance(src[v], (MutableMapping, MutableSequence)):
dst[v] = src[v]
else:
merge(dst[v], src[v], afilter=afilter, flags=flags,
_path=separator.join([_path, str(v)]), separator=separator)
elif isinstance(src, MutableSequence):
for (i, v) in enumerate(src):
_check_typesafe(dst, src, i, separator.join([_path, str(i)]))
dsti = i
if ( flags & MERGE_ADDITIVE):
dsti = len(dst)
if dsti >= len(dst):
dst += [None] * (dsti - (len(dst) - 1))
if dst[dsti] == None:
dst[dsti] = src[i]
else:
if not isinstance(src[i], (MutableMapping, MutableSequence)):
dst[dsti] = src[i]
else:
merge(dst[i], src[i], afilter=afilter, flags=flags,
_path=separator.join([_path, str(i)]), separator=separator) | python | def merge(dst, src, separator="/", afilter=None, flags=MERGE_ADDITIVE, _path=""):
"""Merge source into destination. Like dict.update() but performs
deep merging.
flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE, or
MERGE_TYPESAFE.
* MERGE_ADDITIVE : List objects are combined onto one long
list (NOT a set). This is the default flag.
* MERGE_REPLACE : Instead of combining list objects, when
2 list objects are at an equal depth of merge, replace
the destination with the source.
* MERGE_TYPESAFE : When 2 keys at equal levels are of different
types, raise a TypeError exception. By default, the source
replaces the destination in this situation.
"""
if afilter:
# Having merge do its own afiltering is dumb, let search do the
# heavy lifting for us.
src = search(src, '**', afilter=afilter)
return merge(dst, src)
def _check_typesafe(obj1, obj2, key, path):
if not key in obj1:
return
elif ( (flags & MERGE_TYPESAFE == MERGE_TYPESAFE) and (type(obj1[key]) != type(obj2[key]))):
raise TypeError("Cannot merge objects of type {0} and {1} at {2}"
"".format(type(obj1[key]), type(obj2[key]), path))
elif ( (flags & MERGE_TYPESAFE != MERGE_TYPESAFE) and (type(obj1[key]) != type(obj2[key]))):
obj1.pop(key)
if isinstance(src, MutableMapping):
for (i, v) in enumerate(src):
_check_typesafe(dst, src, v, separator.join([_path, str(v)]))
if not v in dst:
dst[v] = src[v]
else:
if not isinstance(src[v], (MutableMapping, MutableSequence)):
dst[v] = src[v]
else:
merge(dst[v], src[v], afilter=afilter, flags=flags,
_path=separator.join([_path, str(v)]), separator=separator)
elif isinstance(src, MutableSequence):
for (i, v) in enumerate(src):
_check_typesafe(dst, src, i, separator.join([_path, str(i)]))
dsti = i
if ( flags & MERGE_ADDITIVE):
dsti = len(dst)
if dsti >= len(dst):
dst += [None] * (dsti - (len(dst) - 1))
if dst[dsti] == None:
dst[dsti] = src[i]
else:
if not isinstance(src[i], (MutableMapping, MutableSequence)):
dst[dsti] = src[i]
else:
merge(dst[i], src[i], afilter=afilter, flags=flags,
_path=separator.join([_path, str(i)]), separator=separator) | ['def', 'merge', '(', 'dst', ',', 'src', ',', 'separator', '=', '"/"', ',', 'afilter', '=', 'None', ',', 'flags', '=', 'MERGE_ADDITIVE', ',', '_path', '=', '""', ')', ':', 'if', 'afilter', ':', '# Having merge do its own afiltering is dumb, let search do the', '# heavy lifting for us.', 'src', '=', 'search', '(', 'src', ',', "'**'", ',', 'afilter', '=', 'afilter', ')', 'return', 'merge', '(', 'dst', ',', 'src', ')', 'def', '_check_typesafe', '(', 'obj1', ',', 'obj2', ',', 'key', ',', 'path', ')', ':', 'if', 'not', 'key', 'in', 'obj1', ':', 'return', 'elif', '(', '(', 'flags', '&', 'MERGE_TYPESAFE', '==', 'MERGE_TYPESAFE', ')', 'and', '(', 'type', '(', 'obj1', '[', 'key', ']', ')', '!=', 'type', '(', 'obj2', '[', 'key', ']', ')', ')', ')', ':', 'raise', 'TypeError', '(', '"Cannot merge objects of type {0} and {1} at {2}"', '""', '.', 'format', '(', 'type', '(', 'obj1', '[', 'key', ']', ')', ',', 'type', '(', 'obj2', '[', 'key', ']', ')', ',', 'path', ')', ')', 'elif', '(', '(', 'flags', '&', 'MERGE_TYPESAFE', '!=', 'MERGE_TYPESAFE', ')', 'and', '(', 'type', '(', 'obj1', '[', 'key', ']', ')', '!=', 'type', '(', 'obj2', '[', 'key', ']', ')', ')', ')', ':', 'obj1', '.', 'pop', '(', 'key', ')', 'if', 'isinstance', '(', 'src', ',', 'MutableMapping', ')', ':', 'for', '(', 'i', ',', 'v', ')', 'in', 'enumerate', '(', 'src', ')', ':', '_check_typesafe', '(', 'dst', ',', 'src', ',', 'v', ',', 'separator', '.', 'join', '(', '[', '_path', ',', 'str', '(', 'v', ')', ']', ')', ')', 'if', 'not', 'v', 'in', 'dst', ':', 'dst', '[', 'v', ']', '=', 'src', '[', 'v', ']', 'else', ':', 'if', 'not', 'isinstance', '(', 'src', '[', 'v', ']', ',', '(', 'MutableMapping', ',', 'MutableSequence', ')', ')', ':', 'dst', '[', 'v', ']', '=', 'src', '[', 'v', ']', 'else', ':', 'merge', '(', 'dst', '[', 'v', ']', ',', 'src', '[', 'v', ']', ',', 'afilter', '=', 'afilter', ',', 'flags', '=', 'flags', ',', '_path', '=', 'separator', '.', 'join', '(', '[', '_path', ',', 'str', '(', 'v', ')', ']', ')', ',', 'separator', '=', 'separator', ')', 'elif', 'isinstance', '(', 'src', ',', 'MutableSequence', ')', ':', 'for', '(', 'i', ',', 'v', ')', 'in', 'enumerate', '(', 'src', ')', ':', '_check_typesafe', '(', 'dst', ',', 'src', ',', 'i', ',', 'separator', '.', 'join', '(', '[', '_path', ',', 'str', '(', 'i', ')', ']', ')', ')', 'dsti', '=', 'i', 'if', '(', 'flags', '&', 'MERGE_ADDITIVE', ')', ':', 'dsti', '=', 'len', '(', 'dst', ')', 'if', 'dsti', '>=', 'len', '(', 'dst', ')', ':', 'dst', '+=', '[', 'None', ']', '*', '(', 'dsti', '-', '(', 'len', '(', 'dst', ')', '-', '1', ')', ')', 'if', 'dst', '[', 'dsti', ']', '==', 'None', ':', 'dst', '[', 'dsti', ']', '=', 'src', '[', 'i', ']', 'else', ':', 'if', 'not', 'isinstance', '(', 'src', '[', 'i', ']', ',', '(', 'MutableMapping', ',', 'MutableSequence', ')', ')', ':', 'dst', '[', 'dsti', ']', '=', 'src', '[', 'i', ']', 'else', ':', 'merge', '(', 'dst', '[', 'i', ']', ',', 'src', '[', 'i', ']', ',', 'afilter', '=', 'afilter', ',', 'flags', '=', 'flags', ',', '_path', '=', 'separator', '.', 'join', '(', '[', '_path', ',', 'str', '(', 'i', ')', ']', ')', ',', 'separator', '=', 'separator', ')'] | Merge source into destination. Like dict.update() but performs
deep merging.
flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE, or
MERGE_TYPESAFE.
* MERGE_ADDITIVE : List objects are combined onto one long
list (NOT a set). This is the default flag.
* MERGE_REPLACE : Instead of combining list objects, when
2 list objects are at an equal depth of merge, replace
the destination with the source.
* MERGE_TYPESAFE : When 2 keys at equal levels are of different
types, raise a TypeError exception. By default, the source
replaces the destination in this situation. | ['Merge', 'source', 'into', 'destination', '.', 'Like', 'dict', '.', 'update', '()', 'but', 'performs', 'deep', 'merging', '.'] | train | https://github.com/akesterson/dpath-python/blob/2d9117c5fc6870d546aadefb5bf3ab194f4c7411/dpath/util.py#L163-L221 |
6,665 | useblocks/groundwork | groundwork/plugins/gw_documents_info.py | GwDocumentsInfo._store_documentation | def _store_documentation(self, path, html, overwrite, quiet):
"""
Stores all documents on the file system.
Target location is **path**. File name is the lowercase name of the document + .rst.
"""
echo("Storing groundwork application documents\n")
echo("Application: %s" % self.app.name)
echo("Number of documents: %s\n" % len(self.app.documents.get()))
if not os.path.isabs(path):
path = os.path.abspath(path)
if not os.path.isdir(path):
echo("Path %s is not a directory!" % path)
sys.exit(1)
if not os.path.exists(path):
echo("Path %s does not exist" % path)
sys.exit(1)
for dirpath, dirnames, files in os.walk(path):
if files:
echo("Path %s is not empty!\n" % path)
if not overwrite:
sys.exit(1)
documents = []
for key, document in self.app.documents.get().items():
file_extension = ".html" if html else ".rst"
# lowers the name, removes all whitespaces and adds the file extension
file_name_parts = key.lower().split()
file_name = "".join(file_name_parts)
file_name += file_extension
documents.append((file_name, document))
echo("Going to write to following files:")
for document in documents:
echo(" %s" % document[0])
echo("\nTarget directory: %s" % path)
answer = None
while answer not in ["N", "Y"] and not quiet:
answer = prompt("Shall we go on? [Y]es, [N]o: ").upper()
if answer == "N":
sys.exit(0)
for document in documents:
try:
with open(os.path.join(path, document[0]), "w") as doc_file:
doc_rendered = Environment().from_string(document[1].content).render(app=self.app,
plugin=document[1].plugin)
if html:
output = publish_parts(doc_rendered, writer_name="html")['whole']
else:
output = doc_rendered
doc_file.write(output)
except Exception as e:
echo("%s error occurred: %s" % (document[0], e))
else:
echo("%s stored." % document[0]) | python | def _store_documentation(self, path, html, overwrite, quiet):
"""
Stores all documents on the file system.
Target location is **path**. File name is the lowercase name of the document + .rst.
"""
echo("Storing groundwork application documents\n")
echo("Application: %s" % self.app.name)
echo("Number of documents: %s\n" % len(self.app.documents.get()))
if not os.path.isabs(path):
path = os.path.abspath(path)
if not os.path.isdir(path):
echo("Path %s is not a directory!" % path)
sys.exit(1)
if not os.path.exists(path):
echo("Path %s does not exist" % path)
sys.exit(1)
for dirpath, dirnames, files in os.walk(path):
if files:
echo("Path %s is not empty!\n" % path)
if not overwrite:
sys.exit(1)
documents = []
for key, document in self.app.documents.get().items():
file_extension = ".html" if html else ".rst"
# lowers the name, removes all whitespaces and adds the file extension
file_name_parts = key.lower().split()
file_name = "".join(file_name_parts)
file_name += file_extension
documents.append((file_name, document))
echo("Going to write to following files:")
for document in documents:
echo(" %s" % document[0])
echo("\nTarget directory: %s" % path)
answer = None
while answer not in ["N", "Y"] and not quiet:
answer = prompt("Shall we go on? [Y]es, [N]o: ").upper()
if answer == "N":
sys.exit(0)
for document in documents:
try:
with open(os.path.join(path, document[0]), "w") as doc_file:
doc_rendered = Environment().from_string(document[1].content).render(app=self.app,
plugin=document[1].plugin)
if html:
output = publish_parts(doc_rendered, writer_name="html")['whole']
else:
output = doc_rendered
doc_file.write(output)
except Exception as e:
echo("%s error occurred: %s" % (document[0], e))
else:
echo("%s stored." % document[0]) | ['def', '_store_documentation', '(', 'self', ',', 'path', ',', 'html', ',', 'overwrite', ',', 'quiet', ')', ':', 'echo', '(', '"Storing groundwork application documents\\n"', ')', 'echo', '(', '"Application: %s"', '%', 'self', '.', 'app', '.', 'name', ')', 'echo', '(', '"Number of documents: %s\\n"', '%', 'len', '(', 'self', '.', 'app', '.', 'documents', '.', 'get', '(', ')', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'path', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'path', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'path', ')', ':', 'echo', '(', '"Path %s is not a directory!"', '%', 'path', ')', 'sys', '.', 'exit', '(', '1', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'echo', '(', '"Path %s does not exist"', '%', 'path', ')', 'sys', '.', 'exit', '(', '1', ')', 'for', 'dirpath', ',', 'dirnames', ',', 'files', 'in', 'os', '.', 'walk', '(', 'path', ')', ':', 'if', 'files', ':', 'echo', '(', '"Path %s is not empty!\\n"', '%', 'path', ')', 'if', 'not', 'overwrite', ':', 'sys', '.', 'exit', '(', '1', ')', 'documents', '=', '[', ']', 'for', 'key', ',', 'document', 'in', 'self', '.', 'app', '.', 'documents', '.', 'get', '(', ')', '.', 'items', '(', ')', ':', 'file_extension', '=', '".html"', 'if', 'html', 'else', '".rst"', '# lowers the name, removes all whitespaces and adds the file extension', 'file_name_parts', '=', 'key', '.', 'lower', '(', ')', '.', 'split', '(', ')', 'file_name', '=', '""', '.', 'join', '(', 'file_name_parts', ')', 'file_name', '+=', 'file_extension', 'documents', '.', 'append', '(', '(', 'file_name', ',', 'document', ')', ')', 'echo', '(', '"Going to write to following files:"', ')', 'for', 'document', 'in', 'documents', ':', 'echo', '(', '" %s"', '%', 'document', '[', '0', ']', ')', 'echo', '(', '"\\nTarget directory: %s"', '%', 'path', ')', 'answer', '=', 'None', 'while', 'answer', 'not', 'in', '[', '"N"', ',', '"Y"', ']', 'and', 'not', 'quiet', ':', 'answer', '=', 'prompt', '(', '"Shall we go on? [Y]es, [N]o: "', ')', '.', 'upper', '(', ')', 'if', 'answer', '==', '"N"', ':', 'sys', '.', 'exit', '(', '0', ')', 'for', 'document', 'in', 'documents', ':', 'try', ':', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'document', '[', '0', ']', ')', ',', '"w"', ')', 'as', 'doc_file', ':', 'doc_rendered', '=', 'Environment', '(', ')', '.', 'from_string', '(', 'document', '[', '1', ']', '.', 'content', ')', '.', 'render', '(', 'app', '=', 'self', '.', 'app', ',', 'plugin', '=', 'document', '[', '1', ']', '.', 'plugin', ')', 'if', 'html', ':', 'output', '=', 'publish_parts', '(', 'doc_rendered', ',', 'writer_name', '=', '"html"', ')', '[', "'whole'", ']', 'else', ':', 'output', '=', 'doc_rendered', 'doc_file', '.', 'write', '(', 'output', ')', 'except', 'Exception', 'as', 'e', ':', 'echo', '(', '"%s error occurred: %s"', '%', '(', 'document', '[', '0', ']', ',', 'e', ')', ')', 'else', ':', 'echo', '(', '"%s stored."', '%', 'document', '[', '0', ']', ')'] | Stores all documents on the file system.
Target location is **path**. File name is the lowercase name of the document + .rst. | ['Stores', 'all', 'documents', 'on', 'the', 'file', 'system', '.'] | train | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/plugins/gw_documents_info.py#L86-L150 |
6,666 | ilevkivskyi/typing_inspect | typing_inspect.py | _eval_args | def _eval_args(args):
"""Internal helper for get_args."""
res = []
for arg in args:
if not isinstance(arg, tuple):
res.append(arg)
elif is_callable_type(arg[0]):
callable_args = _eval_args(arg[1:])
if len(arg) == 2:
res.append(Callable[[], callable_args[0]])
elif arg[1] is Ellipsis:
res.append(Callable[..., callable_args[1]])
else:
res.append(Callable[list(callable_args[:-1]), callable_args[-1]])
else:
res.append(type(arg[0]).__getitem__(arg[0], _eval_args(arg[1:])))
return tuple(res) | python | def _eval_args(args):
"""Internal helper for get_args."""
res = []
for arg in args:
if not isinstance(arg, tuple):
res.append(arg)
elif is_callable_type(arg[0]):
callable_args = _eval_args(arg[1:])
if len(arg) == 2:
res.append(Callable[[], callable_args[0]])
elif arg[1] is Ellipsis:
res.append(Callable[..., callable_args[1]])
else:
res.append(Callable[list(callable_args[:-1]), callable_args[-1]])
else:
res.append(type(arg[0]).__getitem__(arg[0], _eval_args(arg[1:])))
return tuple(res) | ['def', '_eval_args', '(', 'args', ')', ':', 'res', '=', '[', ']', 'for', 'arg', 'in', 'args', ':', 'if', 'not', 'isinstance', '(', 'arg', ',', 'tuple', ')', ':', 'res', '.', 'append', '(', 'arg', ')', 'elif', 'is_callable_type', '(', 'arg', '[', '0', ']', ')', ':', 'callable_args', '=', '_eval_args', '(', 'arg', '[', '1', ':', ']', ')', 'if', 'len', '(', 'arg', ')', '==', '2', ':', 'res', '.', 'append', '(', 'Callable', '[', '[', ']', ',', 'callable_args', '[', '0', ']', ']', ')', 'elif', 'arg', '[', '1', ']', 'is', 'Ellipsis', ':', 'res', '.', 'append', '(', 'Callable', '[', '...', ',', 'callable_args', '[', '1', ']', ']', ')', 'else', ':', 'res', '.', 'append', '(', 'Callable', '[', 'list', '(', 'callable_args', '[', ':', '-', '1', ']', ')', ',', 'callable_args', '[', '-', '1', ']', ']', ')', 'else', ':', 'res', '.', 'append', '(', 'type', '(', 'arg', '[', '0', ']', ')', '.', '__getitem__', '(', 'arg', '[', '0', ']', ',', '_eval_args', '(', 'arg', '[', '1', ':', ']', ')', ')', ')', 'return', 'tuple', '(', 'res', ')'] | Internal helper for get_args. | ['Internal', 'helper', 'for', 'get_args', '.'] | train | https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L282-L298 |
6,667 | dagwieers/vmguestlib | vmguestlib.py | VMGuestLib.GetHostMemPhysFreeMB | def GetHostMemPhysFreeMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysFreeMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | python | def GetHostMemPhysFreeMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysFreeMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | ['def', 'GetHostMemPhysFreeMB', '(', 'self', ')', ':', 'counter', '=', 'c_uint', '(', ')', 'ret', '=', 'vmGuestLib', '.', 'VMGuestLib_GetHostMemPhysFreeMB', '(', 'self', '.', 'handle', '.', 'value', ',', 'byref', '(', 'counter', ')', ')', 'if', 'ret', '!=', 'VMGUESTLIB_ERROR_SUCCESS', ':', 'raise', 'VMGuestLibException', '(', 'ret', ')', 'return', 'counter', '.', 'value'] | Undocumented. | ['Undocumented', '.'] | train | https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L245-L250 |
6,668 | PythonCharmers/python-future | src/future/backports/email/utils.py | unquote | def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str | python | def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str | ['def', 'unquote', '(', 'str', ')', ':', 'if', 'len', '(', 'str', ')', '>', '1', ':', 'if', 'str', '.', 'startswith', '(', '\'"\'', ')', 'and', 'str', '.', 'endswith', '(', '\'"\'', ')', ':', 'return', 'str', '[', '1', ':', '-', '1', ']', '.', 'replace', '(', "'\\\\\\\\'", ',', "'\\\\'", ')', '.', 'replace', '(', '\'\\\\"\'', ',', '\'"\'', ')', 'if', 'str', '.', 'startswith', '(', "'<'", ')', 'and', 'str', '.', 'endswith', '(', "'>'", ')', ':', 'return', 'str', '[', '1', ':', '-', '1', ']', 'return', 'str'] | Remove quotes from a string. | ['Remove', 'quotes', 'from', 'a', 'string', '.'] | train | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/utils.py#L247-L254 |
6,669 | caseyjlaw/rtpipe | rtpipe/RT.py | make_transient | def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.):
""" Produce a mock transient pulse source for the purposes of characterizing the
detection success of the current pipeline.
Assumes
- Code to inject the transients does so by inserting at an array index
- Noise level at the center of the data array is characteristic of the
noise level throughout
Input
std - noise level in visibilities(?) at mid-point of segment
DMmax - maximum DM at which mock transient can be inserted [pc/cm^3]
Amin/Amax is amplitude in units of the std (calculated below)
rmax/rmin is radius range in arcmin
DMmin is min DM
Returns
loff - direction cosine offset of mock transient from phase center [radians]
moff - direction cosine offset of mock transient from phase center [radians]
A - amplitude of transient [std units]
DM - dispersion measure of mock transient [pc/cm^3]
"""
rad_arcmin = math.pi/(180*60)
phimin = 0.0
phimax = 2*math.pi
# Amplitude of transient, done in units of the std
# std is calculated assuming that noise level in the middle of the data,
# at index d['readints']/2, is characteristic of that throughout the data
A = random.uniform(Amin, Amax) * std
# Position of transient, in direction cosines
r = random.uniform(rmin, rmax)
phi = random.uniform(phimin, phimax)
loff = r*math.cos(phi) * rad_arcmin
moff = r*math.sin(phi) * rad_arcmin
# Dispersion measure
DM = random.uniform(DMmin, DMmax)
return loff, moff, A, DM | python | def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.):
""" Produce a mock transient pulse source for the purposes of characterizing the
detection success of the current pipeline.
Assumes
- Code to inject the transients does so by inserting at an array index
- Noise level at the center of the data array is characteristic of the
noise level throughout
Input
std - noise level in visibilities(?) at mid-point of segment
DMmax - maximum DM at which mock transient can be inserted [pc/cm^3]
Amin/Amax is amplitude in units of the std (calculated below)
rmax/rmin is radius range in arcmin
DMmin is min DM
Returns
loff - direction cosine offset of mock transient from phase center [radians]
moff - direction cosine offset of mock transient from phase center [radians]
A - amplitude of transient [std units]
DM - dispersion measure of mock transient [pc/cm^3]
"""
rad_arcmin = math.pi/(180*60)
phimin = 0.0
phimax = 2*math.pi
# Amplitude of transient, done in units of the std
# std is calculated assuming that noise level in the middle of the data,
# at index d['readints']/2, is characteristic of that throughout the data
A = random.uniform(Amin, Amax) * std
# Position of transient, in direction cosines
r = random.uniform(rmin, rmax)
phi = random.uniform(phimin, phimax)
loff = r*math.cos(phi) * rad_arcmin
moff = r*math.sin(phi) * rad_arcmin
# Dispersion measure
DM = random.uniform(DMmin, DMmax)
return loff, moff, A, DM | ['def', 'make_transient', '(', 'std', ',', 'DMmax', ',', 'Amin', '=', '6.', ',', 'Amax', '=', '20.', ',', 'rmax', '=', '20.', ',', 'rmin', '=', '0.', ',', 'DMmin', '=', '0.', ')', ':', 'rad_arcmin', '=', 'math', '.', 'pi', '/', '(', '180', '*', '60', ')', 'phimin', '=', '0.0', 'phimax', '=', '2', '*', 'math', '.', 'pi', '# Amplitude of transient, done in units of the std', '# std is calculated assuming that noise level in the middle of the data, ', "# at index d['readints']/2, is characteristic of that throughout the data", 'A', '=', 'random', '.', 'uniform', '(', 'Amin', ',', 'Amax', ')', '*', 'std', '# Position of transient, in direction cosines', 'r', '=', 'random', '.', 'uniform', '(', 'rmin', ',', 'rmax', ')', 'phi', '=', 'random', '.', 'uniform', '(', 'phimin', ',', 'phimax', ')', 'loff', '=', 'r', '*', 'math', '.', 'cos', '(', 'phi', ')', '*', 'rad_arcmin', 'moff', '=', 'r', '*', 'math', '.', 'sin', '(', 'phi', ')', '*', 'rad_arcmin', '# Dispersion measure', 'DM', '=', 'random', '.', 'uniform', '(', 'DMmin', ',', 'DMmax', ')', 'return', 'loff', ',', 'moff', ',', 'A', ',', 'DM'] | Produce a mock transient pulse source for the purposes of characterizing the
detection success of the current pipeline.
Assumes
- Code to inject the transients does so by inserting at an array index
- Noise level at the center of the data array is characteristic of the
noise level throughout
Input
std - noise level in visibilities(?) at mid-point of segment
DMmax - maximum DM at which mock transient can be inserted [pc/cm^3]
Amin/Amax is amplitude in units of the std (calculated below)
rmax/rmin is radius range in arcmin
DMmin is min DM
Returns
loff - direction cosine offset of mock transient from phase center [radians]
moff - direction cosine offset of mock transient from phase center [radians]
A - amplitude of transient [std units]
DM - dispersion measure of mock transient [pc/cm^3] | ['Produce', 'a', 'mock', 'transient', 'pulse', 'source', 'for', 'the', 'purposes', 'of', 'characterizing', 'the', 'detection', 'success', 'of', 'the', 'current', 'pipeline', '.', 'Assumes', '-', 'Code', 'to', 'inject', 'the', 'transients', 'does', 'so', 'by', 'inserting', 'at', 'an', 'array', 'index', '-', 'Noise', 'level', 'at', 'the', 'center', 'of', 'the', 'data', 'array', 'is', 'characteristic', 'of', 'the', 'noise', 'level', 'throughout'] | train | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/RT.py#L541-L581 |
6,670 | wmayner/pyphi | pyphi/validate.py | network | def network(n):
"""Validate a |Network|.
Checks the TPM and connectivity matrix.
"""
tpm(n.tpm)
connectivity_matrix(n.cm)
if n.cm.shape[0] != n.size:
raise ValueError("Connectivity matrix must be NxN, where N is the "
"number of nodes in the network.")
return True | python | def network(n):
"""Validate a |Network|.
Checks the TPM and connectivity matrix.
"""
tpm(n.tpm)
connectivity_matrix(n.cm)
if n.cm.shape[0] != n.size:
raise ValueError("Connectivity matrix must be NxN, where N is the "
"number of nodes in the network.")
return True | ['def', 'network', '(', 'n', ')', ':', 'tpm', '(', 'n', '.', 'tpm', ')', 'connectivity_matrix', '(', 'n', '.', 'cm', ')', 'if', 'n', '.', 'cm', '.', 'shape', '[', '0', ']', '!=', 'n', '.', 'size', ':', 'raise', 'ValueError', '(', '"Connectivity matrix must be NxN, where N is the "', '"number of nodes in the network."', ')', 'return', 'True'] | Validate a |Network|.
Checks the TPM and connectivity matrix. | ['Validate', 'a', '|Network|', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/validate.py#L118-L128 |
6,671 | wummel/linkchecker | linkcheck/logger/html.py | HtmlLogger.write_base | def write_base (self, url_data):
"""Write url_data.base_ref."""
self.writeln(u"<tr><td>"+self.part("base")+u"</td><td>"+
cgi.escape(url_data.base_ref)+u"</td></tr>") | python | def write_base (self, url_data):
"""Write url_data.base_ref."""
self.writeln(u"<tr><td>"+self.part("base")+u"</td><td>"+
cgi.escape(url_data.base_ref)+u"</td></tr>") | ['def', 'write_base', '(', 'self', ',', 'url_data', ')', ':', 'self', '.', 'writeln', '(', 'u"<tr><td>"', '+', 'self', '.', 'part', '(', '"base"', ')', '+', 'u"</td><td>"', '+', 'cgi', '.', 'escape', '(', 'url_data', '.', 'base_ref', ')', '+', 'u"</td></tr>"', ')'] | Write url_data.base_ref. | ['Write', 'url_data', '.', 'base_ref', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/html.py#L205-L208 |
6,672 | widdowquinn/pyani | bin/average_nucleotide_identity.py | calculate_anim | def calculate_anim(infiles, org_lengths):
"""Returns ANIm result dataframes for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (which must
be in the path). NUCmer output is stored in the output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment between
the two organisms, as represented by the sequences in the FASTA files.
These are processed to give matrices of aligned sequence lengths,
average nucleotide identity (ANI) percentages, coverage (aligned
percentage of whole genome), and similarity error cound for each pairwise
comparison.
"""
logger.info("Running ANIm")
logger.info("Generating NUCmer command-lines")
deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"])
logger.info("Writing nucmer output to %s", deltadir)
# Schedule NUCmer runs
if not args.skip_nucmer:
joblist = anim.generate_nucmer_jobs(
infiles,
args.outdirname,
nucmer_exe=args.nucmer_exe,
filter_exe=args.filter_exe,
maxmatch=args.maxmatch,
jobprefix=args.jobprefix,
)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
joblist, workers=args.workers, logger=logger
)
logger.info("Cumulative return value: %d", cumval)
if 0 < cumval:
logger.warning(
"At least one NUCmer comparison failed. " + "ANIm may fail."
)
else:
logger.info("All multiprocessing jobs complete.")
else:
logger.info("Running jobs with SGE")
logger.info("Jobarray group size set to %d", args.sgegroupsize)
run_sge.run_dependency_graph(
joblist,
logger=logger,
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
else:
logger.warning("Skipping NUCmer run (as instructed)!")
# Process resulting .delta files
logger.info("Processing NUCmer .delta files.")
results = anim.process_deltadir(deltadir, org_lengths, logger=logger)
if results.zero_error: # zero percentage identity error
if not args.skip_nucmer and args.scheduler == "multiprocessing":
if 0 < cumval:
logger.error(
"This has possibly been a NUCmer run failure, "
+ "please investigate"
)
logger.error(last_exception())
sys.exit(1)
else:
logger.error(
"This is possibly due to a NUCmer comparison "
+ "being too distant for use. Please consider "
+ "using the --maxmatch option."
)
logger.error(
"This is alternatively due to NUCmer run "
+ "failure, analysis will continue, but please "
+ "investigate."
)
if not args.nocompress:
logger.info("Compressing/deleting %s", deltadir)
compress_delete_outdir(deltadir)
# Return processed data from .delta files
return results | python | def calculate_anim(infiles, org_lengths):
"""Returns ANIm result dataframes for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (which must
be in the path). NUCmer output is stored in the output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment between
the two organisms, as represented by the sequences in the FASTA files.
These are processed to give matrices of aligned sequence lengths,
average nucleotide identity (ANI) percentages, coverage (aligned
percentage of whole genome), and similarity error cound for each pairwise
comparison.
"""
logger.info("Running ANIm")
logger.info("Generating NUCmer command-lines")
deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"])
logger.info("Writing nucmer output to %s", deltadir)
# Schedule NUCmer runs
if not args.skip_nucmer:
joblist = anim.generate_nucmer_jobs(
infiles,
args.outdirname,
nucmer_exe=args.nucmer_exe,
filter_exe=args.filter_exe,
maxmatch=args.maxmatch,
jobprefix=args.jobprefix,
)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
joblist, workers=args.workers, logger=logger
)
logger.info("Cumulative return value: %d", cumval)
if 0 < cumval:
logger.warning(
"At least one NUCmer comparison failed. " + "ANIm may fail."
)
else:
logger.info("All multiprocessing jobs complete.")
else:
logger.info("Running jobs with SGE")
logger.info("Jobarray group size set to %d", args.sgegroupsize)
run_sge.run_dependency_graph(
joblist,
logger=logger,
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
else:
logger.warning("Skipping NUCmer run (as instructed)!")
# Process resulting .delta files
logger.info("Processing NUCmer .delta files.")
results = anim.process_deltadir(deltadir, org_lengths, logger=logger)
if results.zero_error: # zero percentage identity error
if not args.skip_nucmer and args.scheduler == "multiprocessing":
if 0 < cumval:
logger.error(
"This has possibly been a NUCmer run failure, "
+ "please investigate"
)
logger.error(last_exception())
sys.exit(1)
else:
logger.error(
"This is possibly due to a NUCmer comparison "
+ "being too distant for use. Please consider "
+ "using the --maxmatch option."
)
logger.error(
"This is alternatively due to NUCmer run "
+ "failure, analysis will continue, but please "
+ "investigate."
)
if not args.nocompress:
logger.info("Compressing/deleting %s", deltadir)
compress_delete_outdir(deltadir)
# Return processed data from .delta files
return results | ['def', 'calculate_anim', '(', 'infiles', ',', 'org_lengths', ')', ':', 'logger', '.', 'info', '(', '"Running ANIm"', ')', 'logger', '.', 'info', '(', '"Generating NUCmer command-lines"', ')', 'deltadir', '=', 'os', '.', 'path', '.', 'join', '(', 'args', '.', 'outdirname', ',', 'ALIGNDIR', '[', '"ANIm"', ']', ')', 'logger', '.', 'info', '(', '"Writing nucmer output to %s"', ',', 'deltadir', ')', '# Schedule NUCmer runs', 'if', 'not', 'args', '.', 'skip_nucmer', ':', 'joblist', '=', 'anim', '.', 'generate_nucmer_jobs', '(', 'infiles', ',', 'args', '.', 'outdirname', ',', 'nucmer_exe', '=', 'args', '.', 'nucmer_exe', ',', 'filter_exe', '=', 'args', '.', 'filter_exe', ',', 'maxmatch', '=', 'args', '.', 'maxmatch', ',', 'jobprefix', '=', 'args', '.', 'jobprefix', ',', ')', 'if', 'args', '.', 'scheduler', '==', '"multiprocessing"', ':', 'logger', '.', 'info', '(', '"Running jobs with multiprocessing"', ')', 'if', 'args', '.', 'workers', 'is', 'None', ':', 'logger', '.', 'info', '(', '"(using maximum number of available "', '+', '"worker threads)"', ')', 'else', ':', 'logger', '.', 'info', '(', '"(using %d worker threads, if available)"', ',', 'args', '.', 'workers', ')', 'cumval', '=', 'run_mp', '.', 'run_dependency_graph', '(', 'joblist', ',', 'workers', '=', 'args', '.', 'workers', ',', 'logger', '=', 'logger', ')', 'logger', '.', 'info', '(', '"Cumulative return value: %d"', ',', 'cumval', ')', 'if', '0', '<', 'cumval', ':', 'logger', '.', 'warning', '(', '"At least one NUCmer comparison failed. "', '+', '"ANIm may fail."', ')', 'else', ':', 'logger', '.', 'info', '(', '"All multiprocessing jobs complete."', ')', 'else', ':', 'logger', '.', 'info', '(', '"Running jobs with SGE"', ')', 'logger', '.', 'info', '(', '"Jobarray group size set to %d"', ',', 'args', '.', 'sgegroupsize', ')', 'run_sge', '.', 'run_dependency_graph', '(', 'joblist', ',', 'logger', '=', 'logger', ',', 'jgprefix', '=', 'args', '.', 'jobprefix', ',', 'sgegroupsize', '=', 'args', '.', 'sgegroupsize', ',', 'sgeargs', '=', 'args', '.', 'sgeargs', ',', ')', 'else', ':', 'logger', '.', 'warning', '(', '"Skipping NUCmer run (as instructed)!"', ')', '# Process resulting .delta files', 'logger', '.', 'info', '(', '"Processing NUCmer .delta files."', ')', 'results', '=', 'anim', '.', 'process_deltadir', '(', 'deltadir', ',', 'org_lengths', ',', 'logger', '=', 'logger', ')', 'if', 'results', '.', 'zero_error', ':', '# zero percentage identity error', 'if', 'not', 'args', '.', 'skip_nucmer', 'and', 'args', '.', 'scheduler', '==', '"multiprocessing"', ':', 'if', '0', '<', 'cumval', ':', 'logger', '.', 'error', '(', '"This has possibly been a NUCmer run failure, "', '+', '"please investigate"', ')', 'logger', '.', 'error', '(', 'last_exception', '(', ')', ')', 'sys', '.', 'exit', '(', '1', ')', 'else', ':', 'logger', '.', 'error', '(', '"This is possibly due to a NUCmer comparison "', '+', '"being too distant for use. Please consider "', '+', '"using the --maxmatch option."', ')', 'logger', '.', 'error', '(', '"This is alternatively due to NUCmer run "', '+', '"failure, analysis will continue, but please "', '+', '"investigate."', ')', 'if', 'not', 'args', '.', 'nocompress', ':', 'logger', '.', 'info', '(', '"Compressing/deleting %s"', ',', 'deltadir', ')', 'compress_delete_outdir', '(', 'deltadir', ')', '# Return processed data from .delta files', 'return', 'results'] | Returns ANIm result dataframes for files in input directory.
- infiles - paths to each input file
- org_lengths - dictionary of input sequence lengths, keyed by sequence
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (which must
be in the path). NUCmer output is stored in the output directory.
The NUCmer .delta file output is parsed to obtain an alignment length
and similarity error count for every unique region alignment between
the two organisms, as represented by the sequences in the FASTA files.
These are processed to give matrices of aligned sequence lengths,
average nucleotide identity (ANI) percentages, coverage (aligned
percentage of whole genome), and similarity error cound for each pairwise
comparison. | ['Returns', 'ANIm', 'result', 'dataframes', 'for', 'files', 'in', 'input', 'directory', '.'] | train | https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/average_nucleotide_identity.py#L506-L599 |
6,673 | mk-fg/txboxdotnet | txboxdotnet/api_v2.py | txBox.resolve_path | def resolve_path( self, path,
root_id='0', objects=False ):
'''Return id (or metadata) of an object, specified by chain
(iterable or fs-style path string) of "name" attributes of it's ancestors,
or raises DoesNotExists error.
Requires a lot of calls to resolve each name in path, so use with care.
root_id parameter allows to specify path
relative to some folder_id (default: 0).'''
if path:
if isinstance(path, types.StringTypes):
path = filter(None, path.split(os.sep))
if path:
try:
for i, name in enumerate(path):
root_id = dict(it.imap(
op.itemgetter('name', 'id'), (yield self.listdir(root_id)) ))[name]
except (KeyError, ProtocolError) as err:
if isinstance(err, ProtocolError) and err.code != 404: raise
raise DoesNotExists(root_id, path[i:])
defer.returnValue(root_id if not objects else (yield self.info(root_id))) | python | def resolve_path( self, path,
root_id='0', objects=False ):
'''Return id (or metadata) of an object, specified by chain
(iterable or fs-style path string) of "name" attributes of it's ancestors,
or raises DoesNotExists error.
Requires a lot of calls to resolve each name in path, so use with care.
root_id parameter allows to specify path
relative to some folder_id (default: 0).'''
if path:
if isinstance(path, types.StringTypes):
path = filter(None, path.split(os.sep))
if path:
try:
for i, name in enumerate(path):
root_id = dict(it.imap(
op.itemgetter('name', 'id'), (yield self.listdir(root_id)) ))[name]
except (KeyError, ProtocolError) as err:
if isinstance(err, ProtocolError) and err.code != 404: raise
raise DoesNotExists(root_id, path[i:])
defer.returnValue(root_id if not objects else (yield self.info(root_id))) | ['def', 'resolve_path', '(', 'self', ',', 'path', ',', 'root_id', '=', "'0'", ',', 'objects', '=', 'False', ')', ':', 'if', 'path', ':', 'if', 'isinstance', '(', 'path', ',', 'types', '.', 'StringTypes', ')', ':', 'path', '=', 'filter', '(', 'None', ',', 'path', '.', 'split', '(', 'os', '.', 'sep', ')', ')', 'if', 'path', ':', 'try', ':', 'for', 'i', ',', 'name', 'in', 'enumerate', '(', 'path', ')', ':', 'root_id', '=', 'dict', '(', 'it', '.', 'imap', '(', 'op', '.', 'itemgetter', '(', "'name'", ',', "'id'", ')', ',', '(', 'yield', 'self', '.', 'listdir', '(', 'root_id', ')', ')', ')', ')', '[', 'name', ']', 'except', '(', 'KeyError', ',', 'ProtocolError', ')', 'as', 'err', ':', 'if', 'isinstance', '(', 'err', ',', 'ProtocolError', ')', 'and', 'err', '.', 'code', '!=', '404', ':', 'raise', 'raise', 'DoesNotExists', '(', 'root_id', ',', 'path', '[', 'i', ':', ']', ')', 'defer', '.', 'returnValue', '(', 'root_id', 'if', 'not', 'objects', 'else', '(', 'yield', 'self', '.', 'info', '(', 'root_id', ')', ')', ')'] | Return id (or metadata) of an object, specified by chain
(iterable or fs-style path string) of "name" attributes of it's ancestors,
or raises DoesNotExists error.
Requires a lot of calls to resolve each name in path, so use with care.
root_id parameter allows to specify path
relative to some folder_id (default: 0). | ['Return', 'id', '(', 'or', 'metadata', ')', 'of', 'an', 'object', 'specified', 'by', 'chain', '(', 'iterable', 'or', 'fs', '-', 'style', 'path', 'string', ')', 'of', 'name', 'attributes', 'of', 'it', 's', 'ancestors', 'or', 'raises', 'DoesNotExists', 'error', '.', 'Requires', 'a', 'lot', 'of', 'calls', 'to', 'resolve', 'each', 'name', 'in', 'path', 'so', 'use', 'with', 'care', '.', 'root_id', 'parameter', 'allows', 'to', 'specify', 'path', 'relative', 'to', 'some', 'folder_id', '(', 'default', ':', '0', ')', '.'] | train | https://github.com/mk-fg/txboxdotnet/blob/4a3e48fbe1388c5e2a17e808aaaf6b2460e61f48/txboxdotnet/api_v2.py#L715-L734 |
6,674 | senaite/senaite.core | bika/lims/api/__init__.py | get_revision_history | def get_revision_history(brain_or_object):
"""Get the revision history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: obj
"""
obj = get_object(brain_or_object)
chv = ContentHistoryView(obj, safe_getattr(obj, "REQUEST", None))
return chv.fullHistory() | python | def get_revision_history(brain_or_object):
"""Get the revision history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: obj
"""
obj = get_object(brain_or_object)
chv = ContentHistoryView(obj, safe_getattr(obj, "REQUEST", None))
return chv.fullHistory() | ['def', 'get_revision_history', '(', 'brain_or_object', ')', ':', 'obj', '=', 'get_object', '(', 'brain_or_object', ')', 'chv', '=', 'ContentHistoryView', '(', 'obj', ',', 'safe_getattr', '(', 'obj', ',', '"REQUEST"', ',', 'None', ')', ')', 'return', 'chv', '.', 'fullHistory', '(', ')'] | Get the revision history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: obj | ['Get', 'the', 'revision', 'history', 'for', 'the', 'given', 'brain', 'or', 'context', '.'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L726-L736 |
6,675 | pymc-devs/pymc | pymc/Matplot.py | plotwrapper | def plotwrapper(f):
"""
This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function.
"""
def wrapper(pymc_obj, *args, **kwargs):
start = 0
if 'start' in kwargs:
start = kwargs.pop('start')
# Figure out what type of object it is
try:
# First try Model type
for variable in pymc_obj._variables_to_tally:
# Plot object
if variable._plot is not False:
data = pymc_obj.trace(variable.__name__)[start:]
if size(data[-1]) >= 10 and variable._plot != True:
continue
elif variable.dtype is dtype('object'):
continue
name = variable.__name__
if args:
name = '%s_%s' % (args[0], variable.__name__)
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
try:
# Then try Trace type
data = pymc_obj()[:]
name = pymc_obj.name
f(data, name, *args, **kwargs)
return
except (AttributeError, TypeError):
pass
try:
# Then try Node type
if pymc_obj._plot is not False:
data = pymc_obj.trace()[start:] # This is deprecated. DH
name = pymc_obj.__name__
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
if isinstance(pymc_obj, dict):
# Then try dictionary
for i in pymc_obj:
data = pymc_obj[i][start:]
if args:
i = '%s_%s' % (args[0], i)
elif 'name' in kwargs:
i = '%s_%s' % (kwargs.pop('name'), i)
f(data, i, *args, **kwargs)
return
# If others fail, assume that raw data is passed
f(pymc_obj, *args, **kwargs)
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper | python | def plotwrapper(f):
"""
This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function.
"""
def wrapper(pymc_obj, *args, **kwargs):
start = 0
if 'start' in kwargs:
start = kwargs.pop('start')
# Figure out what type of object it is
try:
# First try Model type
for variable in pymc_obj._variables_to_tally:
# Plot object
if variable._plot is not False:
data = pymc_obj.trace(variable.__name__)[start:]
if size(data[-1]) >= 10 and variable._plot != True:
continue
elif variable.dtype is dtype('object'):
continue
name = variable.__name__
if args:
name = '%s_%s' % (args[0], variable.__name__)
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
try:
# Then try Trace type
data = pymc_obj()[:]
name = pymc_obj.name
f(data, name, *args, **kwargs)
return
except (AttributeError, TypeError):
pass
try:
# Then try Node type
if pymc_obj._plot is not False:
data = pymc_obj.trace()[start:] # This is deprecated. DH
name = pymc_obj.__name__
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
if isinstance(pymc_obj, dict):
# Then try dictionary
for i in pymc_obj:
data = pymc_obj[i][start:]
if args:
i = '%s_%s' % (args[0], i)
elif 'name' in kwargs:
i = '%s_%s' % (kwargs.pop('name'), i)
f(data, i, *args, **kwargs)
return
# If others fail, assume that raw data is passed
f(pymc_obj, *args, **kwargs)
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper | ['def', 'plotwrapper', '(', 'f', ')', ':', 'def', 'wrapper', '(', 'pymc_obj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'start', '=', '0', 'if', "'start'", 'in', 'kwargs', ':', 'start', '=', 'kwargs', '.', 'pop', '(', "'start'", ')', '# Figure out what type of object it is', 'try', ':', '# First try Model type', 'for', 'variable', 'in', 'pymc_obj', '.', '_variables_to_tally', ':', '# Plot object', 'if', 'variable', '.', '_plot', 'is', 'not', 'False', ':', 'data', '=', 'pymc_obj', '.', 'trace', '(', 'variable', '.', '__name__', ')', '[', 'start', ':', ']', 'if', 'size', '(', 'data', '[', '-', '1', ']', ')', '>=', '10', 'and', 'variable', '.', '_plot', '!=', 'True', ':', 'continue', 'elif', 'variable', '.', 'dtype', 'is', 'dtype', '(', "'object'", ')', ':', 'continue', 'name', '=', 'variable', '.', '__name__', 'if', 'args', ':', 'name', '=', "'%s_%s'", '%', '(', 'args', '[', '0', ']', ',', 'variable', '.', '__name__', ')', 'f', '(', 'data', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'except', 'AttributeError', ':', 'pass', 'try', ':', '# Then try Trace type', 'data', '=', 'pymc_obj', '(', ')', '[', ':', ']', 'name', '=', 'pymc_obj', '.', 'name', 'f', '(', 'data', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'except', '(', 'AttributeError', ',', 'TypeError', ')', ':', 'pass', 'try', ':', '# Then try Node type', 'if', 'pymc_obj', '.', '_plot', 'is', 'not', 'False', ':', 'data', '=', 'pymc_obj', '.', 'trace', '(', ')', '[', 'start', ':', ']', '# This is deprecated. DH', 'name', '=', 'pymc_obj', '.', '__name__', 'f', '(', 'data', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'except', 'AttributeError', ':', 'pass', 'if', 'isinstance', '(', 'pymc_obj', ',', 'dict', ')', ':', '# Then try dictionary', 'for', 'i', 'in', 'pymc_obj', ':', 'data', '=', 'pymc_obj', '[', 'i', ']', '[', 'start', ':', ']', 'if', 'args', ':', 'i', '=', "'%s_%s'", '%', '(', 'args', '[', '0', ']', ',', 'i', ')', 'elif', "'name'", 'in', 'kwargs', ':', 'i', '=', "'%s_%s'", '%', '(', 'kwargs', '.', 'pop', '(', "'name'", ')', ',', 'i', ')', 'f', '(', 'data', ',', 'i', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', '# If others fail, assume that raw data is passed', 'f', '(', 'pymc_obj', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'wrapper', '.', '__doc__', '=', 'f', '.', '__doc__', 'wrapper', '.', '__name__', '=', 'f', '.', '__name__', 'return', 'wrapper'] | This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function. | ['This', 'decorator', 'allows', 'for', 'PyMC', 'arguments', 'of', 'various', 'types', 'to', 'be', 'passed', 'to', 'the', 'plotting', 'functions', '.', 'It', 'identifies', 'the', 'type', 'of', 'object', 'and', 'locates', 'its', 'trace', '(', 's', ')', 'then', 'passes', 'the', 'data', 'to', 'the', 'wrapped', 'plotting', 'function', '.'] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L314-L381 |
6,676 | spotify/snakebite | snakebite/channel.py | SocketRpcChannel.CallMethod | def CallMethod(self, method, controller, request, response_class, done):
'''Call the RPC method. The naming doesn't confirm PEP8, since it's
a method called by protobuf
'''
try:
self.validate_request(request)
if not self.sock:
self.get_connection(self.host, self.port)
self.send_rpc_message(method, request)
byte_stream = self.recv_rpc_message()
return self.parse_response(byte_stream, response_class)
except RequestError: # Raise a request error, but don't close the socket
raise
except Exception: # All other errors close the socket
self.close_socket()
raise | python | def CallMethod(self, method, controller, request, response_class, done):
'''Call the RPC method. The naming doesn't confirm PEP8, since it's
a method called by protobuf
'''
try:
self.validate_request(request)
if not self.sock:
self.get_connection(self.host, self.port)
self.send_rpc_message(method, request)
byte_stream = self.recv_rpc_message()
return self.parse_response(byte_stream, response_class)
except RequestError: # Raise a request error, but don't close the socket
raise
except Exception: # All other errors close the socket
self.close_socket()
raise | ['def', 'CallMethod', '(', 'self', ',', 'method', ',', 'controller', ',', 'request', ',', 'response_class', ',', 'done', ')', ':', 'try', ':', 'self', '.', 'validate_request', '(', 'request', ')', 'if', 'not', 'self', '.', 'sock', ':', 'self', '.', 'get_connection', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')', 'self', '.', 'send_rpc_message', '(', 'method', ',', 'request', ')', 'byte_stream', '=', 'self', '.', 'recv_rpc_message', '(', ')', 'return', 'self', '.', 'parse_response', '(', 'byte_stream', ',', 'response_class', ')', 'except', 'RequestError', ':', "# Raise a request error, but don't close the socket", 'raise', 'except', 'Exception', ':', '# All other errors close the socket', 'self', '.', 'close_socket', '(', ')', 'raise'] | Call the RPC method. The naming doesn't confirm PEP8, since it's
a method called by protobuf | ['Call', 'the', 'RPC', 'method', '.', 'The', 'naming', 'doesn', 't', 'confirm', 'PEP8', 'since', 'it', 's', 'a', 'method', 'called', 'by', 'protobuf'] | train | https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/channel.py#L434-L452 |
6,677 | brunobord/meuhdb | meuhdb/core.py | MeuhDb._clean_index | def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value]) | python | def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value]) | ['def', '_clean_index', '(', 'self', ')', ':', 'for', 'idx_name', ',', 'idx_def', 'in', 'self', '.', 'index_defs', '.', 'items', '(', ')', ':', 'if', 'idx_def', '[', "'type'", ']', '==', "'lazy'", ':', 'self', '.', 'build_index', '(', 'idx_name', ')', 'for', 'index_name', ',', 'values', 'in', 'self', '.', 'indexes', '.', 'items', '(', ')', ':', 'for', 'value', 'in', 'values', ':', 'if', 'not', 'isinstance', '(', 'values', '[', 'value', ']', ',', 'set', ')', ':', 'values', '[', 'value', ']', '=', 'set', '(', 'values', '[', 'value', ']', ')'] | Clean index values after loading. | ['Clean', 'index', 'values', 'after', 'loading', '.'] | train | https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L330-L338 |
6,678 | mrcagney/gtfstk | gtfstk/shapes.py | get_shapes_intersecting_geometry | def get_shapes_intersecting_geometry(
feed: "Feed", geometry, geo_shapes=None, *, geometrized: bool = False
) -> DataFrame:
"""
Return the slice of ``feed.shapes`` that contains all shapes that
intersect the given Shapely geometry, e.g. a Polygon or LineString.
Parameters
----------
feed : Feed
geometry : Shapley geometry, e.g. a Polygon
Specified in WGS84 coordinates
geo_shapes : GeoPandas GeoDataFrame
The output of :func:`geometrize_shapes`
geometrize : boolean
If ``True``, then return the shapes DataFrame as a GeoDataFrame
of the form output by :func:`geometrize_shapes`
Returns
-------
DataFrame or GeoDataFrame
Notes
-----
- Requires GeoPandas
- Specifying ``geo_shapes`` will skip the first step of the
algorithm, namely, geometrizing ``feed.shapes``
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``, if ``geo_shapes`` is not given
"""
if geo_shapes is not None:
f = geo_shapes.copy()
else:
f = geometrize_shapes(feed.shapes)
cols = f.columns
f["hit"] = f["geometry"].intersects(geometry)
f = f[f["hit"]][cols]
if geometrized:
return f
else:
return ungeometrize_shapes(f) | python | def get_shapes_intersecting_geometry(
feed: "Feed", geometry, geo_shapes=None, *, geometrized: bool = False
) -> DataFrame:
"""
Return the slice of ``feed.shapes`` that contains all shapes that
intersect the given Shapely geometry, e.g. a Polygon or LineString.
Parameters
----------
feed : Feed
geometry : Shapley geometry, e.g. a Polygon
Specified in WGS84 coordinates
geo_shapes : GeoPandas GeoDataFrame
The output of :func:`geometrize_shapes`
geometrize : boolean
If ``True``, then return the shapes DataFrame as a GeoDataFrame
of the form output by :func:`geometrize_shapes`
Returns
-------
DataFrame or GeoDataFrame
Notes
-----
- Requires GeoPandas
- Specifying ``geo_shapes`` will skip the first step of the
algorithm, namely, geometrizing ``feed.shapes``
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``, if ``geo_shapes`` is not given
"""
if geo_shapes is not None:
f = geo_shapes.copy()
else:
f = geometrize_shapes(feed.shapes)
cols = f.columns
f["hit"] = f["geometry"].intersects(geometry)
f = f[f["hit"]][cols]
if geometrized:
return f
else:
return ungeometrize_shapes(f) | ['def', 'get_shapes_intersecting_geometry', '(', 'feed', ':', '"Feed"', ',', 'geometry', ',', 'geo_shapes', '=', 'None', ',', '*', ',', 'geometrized', ':', 'bool', '=', 'False', ')', '->', 'DataFrame', ':', 'if', 'geo_shapes', 'is', 'not', 'None', ':', 'f', '=', 'geo_shapes', '.', 'copy', '(', ')', 'else', ':', 'f', '=', 'geometrize_shapes', '(', 'feed', '.', 'shapes', ')', 'cols', '=', 'f', '.', 'columns', 'f', '[', '"hit"', ']', '=', 'f', '[', '"geometry"', ']', '.', 'intersects', '(', 'geometry', ')', 'f', '=', 'f', '[', 'f', '[', '"hit"', ']', ']', '[', 'cols', ']', 'if', 'geometrized', ':', 'return', 'f', 'else', ':', 'return', 'ungeometrize_shapes', '(', 'f', ')'] | Return the slice of ``feed.shapes`` that contains all shapes that
intersect the given Shapely geometry, e.g. a Polygon or LineString.
Parameters
----------
feed : Feed
geometry : Shapley geometry, e.g. a Polygon
Specified in WGS84 coordinates
geo_shapes : GeoPandas GeoDataFrame
The output of :func:`geometrize_shapes`
geometrize : boolean
If ``True``, then return the shapes DataFrame as a GeoDataFrame
of the form output by :func:`geometrize_shapes`
Returns
-------
DataFrame or GeoDataFrame
Notes
-----
- Requires GeoPandas
- Specifying ``geo_shapes`` will skip the first step of the
algorithm, namely, geometrizing ``feed.shapes``
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``, if ``geo_shapes`` is not given | ['Return', 'the', 'slice', 'of', 'feed', '.', 'shapes', 'that', 'contains', 'all', 'shapes', 'that', 'intersect', 'the', 'given', 'Shapely', 'geometry', 'e', '.', 'g', '.', 'a', 'Polygon', 'or', 'LineString', '.'] | train | https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/shapes.py#L111-L155 |
6,679 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py | brocade_lag.get_port_channel_detail_output_lacp_partner_system_id | def get_port_channel_detail_output_lacp_partner_system_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
partner_system_id = ET.SubElement(lacp, "partner-system-id")
partner_system_id.text = kwargs.pop('partner_system_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_port_channel_detail_output_lacp_partner_system_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
partner_system_id = ET.SubElement(lacp, "partner-system-id")
partner_system_id.text = kwargs.pop('partner_system_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'get_port_channel_detail_output_lacp_partner_system_id', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_port_channel_detail', '=', 'ET', '.', 'Element', '(', '"get_port_channel_detail"', ')', 'config', '=', 'get_port_channel_detail', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_port_channel_detail', ',', '"output"', ')', 'lacp', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"lacp"', ')', 'partner_system_id', '=', 'ET', '.', 'SubElement', '(', 'lacp', ',', '"partner-system-id"', ')', 'partner_system_id', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'partner_system_id'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py#L140-L152 |
6,680 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py | brocade_ip_policy.hide_routemap_holder_route_map_content_set_automatic_tag_tag_empty | def hide_routemap_holder_route_map_content_set_automatic_tag_tag_empty(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
automatic_tag = ET.SubElement(set, "automatic-tag")
tag_empty = ET.SubElement(automatic_tag, "tag-empty")
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def hide_routemap_holder_route_map_content_set_automatic_tag_tag_empty(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
automatic_tag = ET.SubElement(set, "automatic-tag")
tag_empty = ET.SubElement(automatic_tag, "tag-empty")
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'hide_routemap_holder_route_map_content_set_automatic_tag_tag_empty', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'hide_routemap_holder', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"hide-routemap-holder"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-ip-policy"', ')', 'route_map', '=', 'ET', '.', 'SubElement', '(', 'hide_routemap_holder', ',', '"route-map"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'action_rm_key', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"action-rm"', ')', 'action_rm_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'action_rm'", ')', 'instance_key', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"instance"', ')', 'instance_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'instance'", ')', 'content', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"content"', ')', 'set', '=', 'ET', '.', 'SubElement', '(', 'content', ',', '"set"', ')', 'automatic_tag', '=', 'ET', '.', 'SubElement', '(', 'set', ',', '"automatic-tag"', ')', 'tag_empty', '=', 'ET', '.', 'SubElement', '(', 'automatic_tag', ',', '"tag-empty"', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L955-L973 |
6,681 | summanlp/textrank | summa/preprocessing/snowball.py | FrenchStemmer.__rv_french | def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv | python | def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv | ['def', '__rv_french', '(', 'self', ',', 'word', ',', 'vowels', ')', ':', 'rv', '=', '""', 'if', 'len', '(', 'word', ')', '>=', '2', ':', 'if', '(', 'word', '.', 'startswith', '(', '(', '"par"', ',', '"col"', ',', '"tap"', ')', ')', 'or', '(', 'word', '[', '0', ']', 'in', 'vowels', 'and', 'word', '[', '1', ']', 'in', 'vowels', ')', ')', ':', 'rv', '=', 'word', '[', '3', ':', ']', 'else', ':', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'word', ')', ')', ':', 'if', 'word', '[', 'i', ']', 'in', 'vowels', ':', 'rv', '=', 'word', '[', 'i', '+', '1', ':', ']', 'break', 'return', 'rv'] | Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly! | ['Return', 'the', 'region', 'RV', 'that', 'is', 'used', 'by', 'the', 'French', 'stemmer', '.'] | train | https://github.com/summanlp/textrank/blob/6844bbe8c4b2b468020ae0dfd6574a743f9ad442/summa/preprocessing/snowball.py#L1614-L1647 |
6,682 | konstantinstadler/pymrio | pymrio/core/fileio.py | load | def load(path, include_core=True, path_in_arc=''):
""" Loads a IOSystem or Extension previously saved with pymrio
This function can be used to load a IOSystem or Extension specified in a
metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json)
DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load. This must
either point to the directory containing the uncompressed data or
the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' need to be specific to
further indicate the location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Returns
-------
IOSystem or Extension class depending on systemtype in the json file
None in case of errors
"""
path = Path(path)
if not path.exists():
raise ReadError('Given path does not exist')
file_para = get_file_para(path=path, path_in_arc=path_in_arc)
if file_para.content['systemtype'] == GENERIC_NAMES['iosys']:
if zipfile.is_zipfile(str(path)):
ret_system = IOSystem(meta=MRIOMetaData(
location=path,
path_in_arc=os.path.join(file_para.folder,
DEFAULT_FILE_NAMES['metadata'])))
ret_system.meta._add_fileio(
"Loaded IO system from {} - {}".format(path, path_in_arc))
else:
ret_system = IOSystem(meta=MRIOMetaData(
location=path / DEFAULT_FILE_NAMES['metadata']))
ret_system.meta._add_fileio(
"Loaded IO system from {}".format(path))
elif file_para.content['systemtype'] == GENERIC_NAMES['ext']:
ret_system = Extension(file_para.content['name'])
else:
raise ReadError('Type of system no defined in the file parameters')
return None
for key in file_para.content['files']:
if not include_core and key not in ['A', 'L', 'Z']:
continue
file_name = file_para.content['files'][key]['name']
nr_index_col = file_para.content['files'][key]['nr_index_col']
nr_header = file_para.content['files'][key]['nr_header']
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
_index_col = 0 if _index_col == [0] else _index_col
_header = 0 if _header == [0] else _header
if zipfile.is_zipfile(str(path)):
full_file_name = os.path.join(file_para.folder, file_name)
logging.info('Load data from {}'.format(full_file_name))
with zipfile.ZipFile(file=str(path)) as zf:
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(zf.open(full_file_name)))
else:
setattr(ret_system, key,
pd.read_table(zf.open(full_file_name),
index_col=_index_col,
header=_header))
else:
full_file_name = path / file_name
logging.info('Load data from {}'.format(full_file_name))
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(full_file_name))
else:
setattr(ret_system, key,
pd.read_table(full_file_name,
index_col=_index_col,
header=_header))
return ret_system | python | def load(path, include_core=True, path_in_arc=''):
""" Loads a IOSystem or Extension previously saved with pymrio
This function can be used to load a IOSystem or Extension specified in a
metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json)
DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load. This must
either point to the directory containing the uncompressed data or
the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' need to be specific to
further indicate the location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Returns
-------
IOSystem or Extension class depending on systemtype in the json file
None in case of errors
"""
path = Path(path)
if not path.exists():
raise ReadError('Given path does not exist')
file_para = get_file_para(path=path, path_in_arc=path_in_arc)
if file_para.content['systemtype'] == GENERIC_NAMES['iosys']:
if zipfile.is_zipfile(str(path)):
ret_system = IOSystem(meta=MRIOMetaData(
location=path,
path_in_arc=os.path.join(file_para.folder,
DEFAULT_FILE_NAMES['metadata'])))
ret_system.meta._add_fileio(
"Loaded IO system from {} - {}".format(path, path_in_arc))
else:
ret_system = IOSystem(meta=MRIOMetaData(
location=path / DEFAULT_FILE_NAMES['metadata']))
ret_system.meta._add_fileio(
"Loaded IO system from {}".format(path))
elif file_para.content['systemtype'] == GENERIC_NAMES['ext']:
ret_system = Extension(file_para.content['name'])
else:
raise ReadError('Type of system no defined in the file parameters')
return None
for key in file_para.content['files']:
if not include_core and key not in ['A', 'L', 'Z']:
continue
file_name = file_para.content['files'][key]['name']
nr_index_col = file_para.content['files'][key]['nr_index_col']
nr_header = file_para.content['files'][key]['nr_header']
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
_index_col = 0 if _index_col == [0] else _index_col
_header = 0 if _header == [0] else _header
if zipfile.is_zipfile(str(path)):
full_file_name = os.path.join(file_para.folder, file_name)
logging.info('Load data from {}'.format(full_file_name))
with zipfile.ZipFile(file=str(path)) as zf:
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(zf.open(full_file_name)))
else:
setattr(ret_system, key,
pd.read_table(zf.open(full_file_name),
index_col=_index_col,
header=_header))
else:
full_file_name = path / file_name
logging.info('Load data from {}'.format(full_file_name))
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(full_file_name))
else:
setattr(ret_system, key,
pd.read_table(full_file_name,
index_col=_index_col,
header=_header))
return ret_system | ['def', 'load', '(', 'path', ',', 'include_core', '=', 'True', ',', 'path_in_arc', '=', "''", ')', ':', 'path', '=', 'Path', '(', 'path', ')', 'if', 'not', 'path', '.', 'exists', '(', ')', ':', 'raise', 'ReadError', '(', "'Given path does not exist'", ')', 'file_para', '=', 'get_file_para', '(', 'path', '=', 'path', ',', 'path_in_arc', '=', 'path_in_arc', ')', 'if', 'file_para', '.', 'content', '[', "'systemtype'", ']', '==', 'GENERIC_NAMES', '[', "'iosys'", ']', ':', 'if', 'zipfile', '.', 'is_zipfile', '(', 'str', '(', 'path', ')', ')', ':', 'ret_system', '=', 'IOSystem', '(', 'meta', '=', 'MRIOMetaData', '(', 'location', '=', 'path', ',', 'path_in_arc', '=', 'os', '.', 'path', '.', 'join', '(', 'file_para', '.', 'folder', ',', 'DEFAULT_FILE_NAMES', '[', "'metadata'", ']', ')', ')', ')', 'ret_system', '.', 'meta', '.', '_add_fileio', '(', '"Loaded IO system from {} - {}"', '.', 'format', '(', 'path', ',', 'path_in_arc', ')', ')', 'else', ':', 'ret_system', '=', 'IOSystem', '(', 'meta', '=', 'MRIOMetaData', '(', 'location', '=', 'path', '/', 'DEFAULT_FILE_NAMES', '[', "'metadata'", ']', ')', ')', 'ret_system', '.', 'meta', '.', '_add_fileio', '(', '"Loaded IO system from {}"', '.', 'format', '(', 'path', ')', ')', 'elif', 'file_para', '.', 'content', '[', "'systemtype'", ']', '==', 'GENERIC_NAMES', '[', "'ext'", ']', ':', 'ret_system', '=', 'Extension', '(', 'file_para', '.', 'content', '[', "'name'", ']', ')', 'else', ':', 'raise', 'ReadError', '(', "'Type of system no defined in the file parameters'", ')', 'return', 'None', 'for', 'key', 'in', 'file_para', '.', 'content', '[', "'files'", ']', ':', 'if', 'not', 'include_core', 'and', 'key', 'not', 'in', '[', "'A'", ',', "'L'", ',', "'Z'", ']', ':', 'continue', 'file_name', '=', 'file_para', '.', 'content', '[', "'files'", ']', '[', 'key', ']', '[', "'name'", ']', 'nr_index_col', '=', 'file_para', '.', 'content', '[', "'files'", ']', '[', 'key', ']', '[', "'nr_index_col'", ']', 'nr_header', '=', 'file_para', '.', 'content', '[', "'files'", ']', '[', 'key', ']', '[', "'nr_header'", ']', '_index_col', '=', 'list', '(', 'range', '(', 'int', '(', 'nr_index_col', ')', ')', ')', '_header', '=', 'list', '(', 'range', '(', 'int', '(', 'nr_header', ')', ')', ')', '_index_col', '=', '0', 'if', '_index_col', '==', '[', '0', ']', 'else', '_index_col', '_header', '=', '0', 'if', '_header', '==', '[', '0', ']', 'else', '_header', 'if', 'zipfile', '.', 'is_zipfile', '(', 'str', '(', 'path', ')', ')', ':', 'full_file_name', '=', 'os', '.', 'path', '.', 'join', '(', 'file_para', '.', 'folder', ',', 'file_name', ')', 'logging', '.', 'info', '(', "'Load data from {}'", '.', 'format', '(', 'full_file_name', ')', ')', 'with', 'zipfile', '.', 'ZipFile', '(', 'file', '=', 'str', '(', 'path', ')', ')', 'as', 'zf', ':', 'if', '(', 'os', '.', 'path', '.', 'splitext', '(', 'str', '(', 'full_file_name', ')', ')', '[', '1', ']', '==', "'.pkl'", 'or', 'os', '.', 'path', '.', 'splitext', '(', 'str', '(', 'full_file_name', ')', ')', '[', '1', ']', '==', "'.pickle'", ')', ':', 'setattr', '(', 'ret_system', ',', 'key', ',', 'pd', '.', 'read_pickle', '(', 'zf', '.', 'open', '(', 'full_file_name', ')', ')', ')', 'else', ':', 'setattr', '(', 'ret_system', ',', 'key', ',', 'pd', '.', 'read_table', '(', 'zf', '.', 'open', '(', 'full_file_name', ')', ',', 'index_col', '=', '_index_col', ',', 'header', '=', '_header', ')', ')', 'else', ':', 'full_file_name', '=', 'path', '/', 'file_name', 'logging', '.', 'info', '(', "'Load data from {}'", '.', 'format', '(', 'full_file_name', ')', ')', 'if', '(', 'os', '.', 'path', '.', 'splitext', '(', 'str', '(', 'full_file_name', ')', ')', '[', '1', ']', '==', "'.pkl'", 'or', 'os', '.', 'path', '.', 'splitext', '(', 'str', '(', 'full_file_name', ')', ')', '[', '1', ']', '==', "'.pickle'", ')', ':', 'setattr', '(', 'ret_system', ',', 'key', ',', 'pd', '.', 'read_pickle', '(', 'full_file_name', ')', ')', 'else', ':', 'setattr', '(', 'ret_system', ',', 'key', ',', 'pd', '.', 'read_table', '(', 'full_file_name', ',', 'index_col', '=', '_index_col', ',', 'header', '=', '_header', ')', ')', 'return', 'ret_system'] | Loads a IOSystem or Extension previously saved with pymrio
This function can be used to load a IOSystem or Extension specified in a
metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json)
DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load. This must
either point to the directory containing the uncompressed data or
the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' need to be specific to
further indicate the location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Returns
-------
IOSystem or Extension class depending on systemtype in the json file
None in case of errors | ['Loads', 'a', 'IOSystem', 'or', 'Extension', 'previously', 'saved', 'with', 'pymrio'] | train | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/fileio.py#L184-L289 |
6,683 | MartinThoma/hwrt | hwrt/view.py | _list_ids | def _list_ids(path_to_data):
"""List raw data IDs grouped by symbol ID from a pickle file
``path_to_data``."""
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
raw_ids = {}
for raw_dataset in raw_datasets:
raw_data_id = raw_dataset['handwriting'].raw_data_id
if raw_dataset['formula_id'] not in raw_ids:
raw_ids[raw_dataset['formula_id']] = [raw_data_id]
else:
raw_ids[raw_dataset['formula_id']].append(raw_data_id)
for symbol_id in sorted(raw_ids):
print("%i: %s" % (symbol_id, sorted(raw_ids[symbol_id]))) | python | def _list_ids(path_to_data):
"""List raw data IDs grouped by symbol ID from a pickle file
``path_to_data``."""
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
raw_ids = {}
for raw_dataset in raw_datasets:
raw_data_id = raw_dataset['handwriting'].raw_data_id
if raw_dataset['formula_id'] not in raw_ids:
raw_ids[raw_dataset['formula_id']] = [raw_data_id]
else:
raw_ids[raw_dataset['formula_id']].append(raw_data_id)
for symbol_id in sorted(raw_ids):
print("%i: %s" % (symbol_id, sorted(raw_ids[symbol_id]))) | ['def', '_list_ids', '(', 'path_to_data', ')', ':', 'loaded', '=', 'pickle', '.', 'load', '(', 'open', '(', 'path_to_data', ',', '"rb"', ')', ')', 'raw_datasets', '=', 'loaded', '[', "'handwriting_datasets'", ']', 'raw_ids', '=', '{', '}', 'for', 'raw_dataset', 'in', 'raw_datasets', ':', 'raw_data_id', '=', 'raw_dataset', '[', "'handwriting'", ']', '.', 'raw_data_id', 'if', 'raw_dataset', '[', "'formula_id'", ']', 'not', 'in', 'raw_ids', ':', 'raw_ids', '[', 'raw_dataset', '[', "'formula_id'", ']', ']', '=', '[', 'raw_data_id', ']', 'else', ':', 'raw_ids', '[', 'raw_dataset', '[', "'formula_id'", ']', ']', '.', 'append', '(', 'raw_data_id', ')', 'for', 'symbol_id', 'in', 'sorted', '(', 'raw_ids', ')', ':', 'print', '(', '"%i: %s"', '%', '(', 'symbol_id', ',', 'sorted', '(', 'raw_ids', '[', 'symbol_id', ']', ')', ')', ')'] | List raw data IDs grouped by symbol ID from a pickle file
``path_to_data``. | ['List', 'raw', 'data', 'IDs', 'grouped', 'by', 'symbol', 'ID', 'from', 'a', 'pickle', 'file', 'path_to_data', '.'] | train | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/view.py#L68-L81 |
6,684 | rdussurget/py-altimetry | altimetry/tools/nctools.py | attrStr.add | def add(self,attrlist,attrvalues):
'''
add an attribute
:parameter dimlist: list of dimensions
:parameter dimvalues: list of values for dimlist
'''
for i,d in enumerate(attrlist):
self[d] = attrvalues[i] | python | def add(self,attrlist,attrvalues):
'''
add an attribute
:parameter dimlist: list of dimensions
:parameter dimvalues: list of values for dimlist
'''
for i,d in enumerate(attrlist):
self[d] = attrvalues[i] | ['def', 'add', '(', 'self', ',', 'attrlist', ',', 'attrvalues', ')', ':', 'for', 'i', ',', 'd', 'in', 'enumerate', '(', 'attrlist', ')', ':', 'self', '[', 'd', ']', '=', 'attrvalues', '[', 'i', ']'] | add an attribute
:parameter dimlist: list of dimensions
:parameter dimvalues: list of values for dimlist | ['add', 'an', 'attribute', ':', 'parameter', 'dimlist', ':', 'list', 'of', 'dimensions', ':', 'parameter', 'dimvalues', ':', 'list', 'of', 'values', 'for', 'dimlist'] | train | https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/tools/nctools.py#L140-L148 |
6,685 | pmneila/morphsnakes | examples.py | visual_callback_2d | def visual_callback_2d(background, fig=None):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback | python | def visual_callback_2d(background, fig=None):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback | ['def', 'visual_callback_2d', '(', 'background', ',', 'fig', '=', 'None', ')', ':', '# Prepare the visual environment.', 'if', 'fig', 'is', 'None', ':', 'fig', '=', 'plt', '.', 'figure', '(', ')', 'fig', '.', 'clf', '(', ')', 'ax1', '=', 'fig', '.', 'add_subplot', '(', '1', ',', '2', ',', '1', ')', 'ax1', '.', 'imshow', '(', 'background', ',', 'cmap', '=', 'plt', '.', 'cm', '.', 'gray', ')', 'ax2', '=', 'fig', '.', 'add_subplot', '(', '1', ',', '2', ',', '2', ')', 'ax_u', '=', 'ax2', '.', 'imshow', '(', 'np', '.', 'zeros_like', '(', 'background', ')', ',', 'vmin', '=', '0', ',', 'vmax', '=', '1', ')', 'plt', '.', 'pause', '(', '0.001', ')', 'def', 'callback', '(', 'levelset', ')', ':', 'if', 'ax1', '.', 'collections', ':', 'del', 'ax1', '.', 'collections', '[', '0', ']', 'ax1', '.', 'contour', '(', 'levelset', ',', '[', '0.5', ']', ',', 'colors', '=', "'r'", ')', 'ax_u', '.', 'set_data', '(', 'levelset', ')', 'fig', '.', 'canvas', '.', 'draw', '(', ')', 'plt', '.', 'pause', '(', '0.001', ')', 'return', 'callback'] | Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`. | ['Returns', 'a', 'callback', 'than', 'can', 'be', 'passed', 'as', 'the', 'argument', 'iter_callback', 'of', 'morphological_geodesic_active_contour', 'and', 'morphological_chan_vese', 'for', 'visualizing', 'the', 'evolution', 'of', 'the', 'levelsets', '.', 'Only', 'works', 'for', '2D', 'images', '.', 'Parameters', '----------', 'background', ':', '(', 'M', 'N', ')', 'array', 'Image', 'to', 'be', 'plotted', 'as', 'the', 'background', 'of', 'the', 'visual', 'evolution', '.', 'fig', ':', 'matplotlib', '.', 'figure', '.', 'Figure', 'Figure', 'where', 'results', 'will', 'be', 'drawn', '.', 'If', 'not', 'given', 'a', 'new', 'figure', 'will', 'be', 'created', '.', 'Returns', '-------', 'callback', ':', 'Python', 'function', 'A', 'function', 'that', 'receives', 'a', 'levelset', 'and', 'updates', 'the', 'current', 'plot', 'accordingly', '.', 'This', 'can', 'be', 'passed', 'as', 'the', 'iter_callback', 'argument', 'of', 'morphological_geodesic_active_contour', 'and', 'morphological_chan_vese', '.'] | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/examples.py#L25-L70 |
6,686 | miso-belica/sumy | sumy/utils.py | cached_property | def cached_property(getter):
"""
Decorator that converts a method into memoized property.
The decorator works as expected only for classes with
attribute '__dict__' and immutable properties.
"""
@wraps(getter)
def decorator(self):
key = "_cached_property_" + getter.__name__
if not hasattr(self, key):
setattr(self, key, getter(self))
return getattr(self, key)
return property(decorator) | python | def cached_property(getter):
"""
Decorator that converts a method into memoized property.
The decorator works as expected only for classes with
attribute '__dict__' and immutable properties.
"""
@wraps(getter)
def decorator(self):
key = "_cached_property_" + getter.__name__
if not hasattr(self, key):
setattr(self, key, getter(self))
return getattr(self, key)
return property(decorator) | ['def', 'cached_property', '(', 'getter', ')', ':', '@', 'wraps', '(', 'getter', ')', 'def', 'decorator', '(', 'self', ')', ':', 'key', '=', '"_cached_property_"', '+', 'getter', '.', '__name__', 'if', 'not', 'hasattr', '(', 'self', ',', 'key', ')', ':', 'setattr', '(', 'self', ',', 'key', ',', 'getter', '(', 'self', ')', ')', 'return', 'getattr', '(', 'self', ',', 'key', ')', 'return', 'property', '(', 'decorator', ')'] | Decorator that converts a method into memoized property.
The decorator works as expected only for classes with
attribute '__dict__' and immutable properties. | ['Decorator', 'that', 'converts', 'a', 'method', 'into', 'memoized', 'property', '.', 'The', 'decorator', 'works', 'as', 'expected', 'only', 'for', 'classes', 'with', 'attribute', '__dict__', 'and', 'immutable', 'properties', '.'] | train | https://github.com/miso-belica/sumy/blob/099ab4938e2c1b6a011297375586bac2953641b9/sumy/utils.py#L42-L57 |
6,687 | TC01/calcpkg | calcrepo/repo.py | CalcRepository.getDownloadUrls | def getDownloadUrls(self):
"""Return a list of the urls to download from"""
data = self.searchIndex(False)
fileUrls = []
for datum in data:
fileUrl = self.formatDownloadUrl(datum[0])
fileUrls.append(fileUrl)
return fileUrls | python | def getDownloadUrls(self):
"""Return a list of the urls to download from"""
data = self.searchIndex(False)
fileUrls = []
for datum in data:
fileUrl = self.formatDownloadUrl(datum[0])
fileUrls.append(fileUrl)
return fileUrls | ['def', 'getDownloadUrls', '(', 'self', ')', ':', 'data', '=', 'self', '.', 'searchIndex', '(', 'False', ')', 'fileUrls', '=', '[', ']', 'for', 'datum', 'in', 'data', ':', 'fileUrl', '=', 'self', '.', 'formatDownloadUrl', '(', 'datum', '[', '0', ']', ')', 'fileUrls', '.', 'append', '(', 'fileUrl', ')', 'return', 'fileUrls'] | Return a list of the urls to download from | ['Return', 'a', 'list', 'of', 'the', 'urls', 'to', 'download', 'from'] | train | https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L65-L72 |
6,688 | cloudendpoints/endpoints-python | endpoints/_endpointscfg_impl.py | _EndpointsParser.error | def error(self, message):
"""Override superclass to support customized error message.
Error message needs to be rewritten in order to display visible commands
only, when invalid command is called by user. Otherwise, hidden commands
will be displayed in stderr, which is not expected.
Refer the following argparse python documentation for detailed method
information:
http://docs.python.org/2/library/argparse.html#exiting-methods
Args:
message: original error message that will be printed to stderr
"""
# subcommands_quoted is the same as subcommands, except each value is
# surrounded with double quotes. This is done to match the standard
# output of the ArgumentParser, while hiding commands we don't want users
# to use, as they are no longer documented and only here for legacy use.
subcommands_quoted = ', '.join(
[repr(command) for command in _VISIBLE_COMMANDS])
subcommands = ', '.join(_VISIBLE_COMMANDS)
message = re.sub(
r'(argument {%s}: invalid choice: .*) \(choose from (.*)\)$'
% subcommands, r'\1 (choose from %s)' % subcommands_quoted, message)
super(_EndpointsParser, self).error(message) | python | def error(self, message):
"""Override superclass to support customized error message.
Error message needs to be rewritten in order to display visible commands
only, when invalid command is called by user. Otherwise, hidden commands
will be displayed in stderr, which is not expected.
Refer the following argparse python documentation for detailed method
information:
http://docs.python.org/2/library/argparse.html#exiting-methods
Args:
message: original error message that will be printed to stderr
"""
# subcommands_quoted is the same as subcommands, except each value is
# surrounded with double quotes. This is done to match the standard
# output of the ArgumentParser, while hiding commands we don't want users
# to use, as they are no longer documented and only here for legacy use.
subcommands_quoted = ', '.join(
[repr(command) for command in _VISIBLE_COMMANDS])
subcommands = ', '.join(_VISIBLE_COMMANDS)
message = re.sub(
r'(argument {%s}: invalid choice: .*) \(choose from (.*)\)$'
% subcommands, r'\1 (choose from %s)' % subcommands_quoted, message)
super(_EndpointsParser, self).error(message) | ['def', 'error', '(', 'self', ',', 'message', ')', ':', '# subcommands_quoted is the same as subcommands, except each value is', '# surrounded with double quotes. This is done to match the standard', "# output of the ArgumentParser, while hiding commands we don't want users", '# to use, as they are no longer documented and only here for legacy use.', 'subcommands_quoted', '=', "', '", '.', 'join', '(', '[', 'repr', '(', 'command', ')', 'for', 'command', 'in', '_VISIBLE_COMMANDS', ']', ')', 'subcommands', '=', "', '", '.', 'join', '(', '_VISIBLE_COMMANDS', ')', 'message', '=', 're', '.', 'sub', '(', "r'(argument {%s}: invalid choice: .*) \\(choose from (.*)\\)$'", '%', 'subcommands', ',', "r'\\1 (choose from %s)'", '%', 'subcommands_quoted', ',', 'message', ')', 'super', '(', '_EndpointsParser', ',', 'self', ')', '.', 'error', '(', 'message', ')'] | Override superclass to support customized error message.
Error message needs to be rewritten in order to display visible commands
only, when invalid command is called by user. Otherwise, hidden commands
will be displayed in stderr, which is not expected.
Refer the following argparse python documentation for detailed method
information:
http://docs.python.org/2/library/argparse.html#exiting-methods
Args:
message: original error message that will be printed to stderr | ['Override', 'superclass', 'to', 'support', 'customized', 'error', 'message', '.'] | train | https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/_endpointscfg_impl.py#L111-L135 |
6,689 | MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_mdlCrt.py | cnvlGauss2D | def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol,
queOut):
"""Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = np.size(aryMdlParamsChnk, axis=0)
# Determine number of motion directions
varNumMtnDrtn = aryBoxCar.shape[2]
# Output array with pRF model time courses:
aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol])
# Loop through different motion directions:
for idxMtn in range(0, varNumMtnDrtn):
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
varTmpX = aryMdlParamsChnk[idxMdl, 1]
varTmpY = aryMdlParamsChnk[idxMdl, 2]
varTmpSd = aryMdlParamsChnk[idxMdl, 3]
# Create pRF model (2D):
aryGauss = crtGauss2D(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :],
aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# pRF time course model (i.e. not yet scaled for size of the pRF).
aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp
# Put column with the indicies of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses into
# the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) | python | def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol,
queOut):
"""Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = np.size(aryMdlParamsChnk, axis=0)
# Determine number of motion directions
varNumMtnDrtn = aryBoxCar.shape[2]
# Output array with pRF model time courses:
aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol])
# Loop through different motion directions:
for idxMtn in range(0, varNumMtnDrtn):
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
varTmpX = aryMdlParamsChnk[idxMdl, 1]
varTmpY = aryMdlParamsChnk[idxMdl, 2]
varTmpSd = aryMdlParamsChnk[idxMdl, 3]
# Create pRF model (2D):
aryGauss = crtGauss2D(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :],
aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# pRF time course model (i.e. not yet scaled for size of the pRF).
aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp
# Put column with the indicies of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses into
# the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) | ['def', 'cnvlGauss2D', '(', 'idxPrc', ',', 'aryBoxCar', ',', 'aryMdlParamsChnk', ',', 'tplPngSize', ',', 'varNumVol', ',', 'queOut', ')', ':', '# Number of combinations of model parameters in the current chunk:', 'varChnkSze', '=', 'np', '.', 'size', '(', 'aryMdlParamsChnk', ',', 'axis', '=', '0', ')', '# Determine number of motion directions', 'varNumMtnDrtn', '=', 'aryBoxCar', '.', 'shape', '[', '2', ']', '# Output array with pRF model time courses:', 'aryOut', '=', 'np', '.', 'zeros', '(', '[', 'varChnkSze', ',', 'varNumMtnDrtn', ',', 'varNumVol', ']', ')', '# Loop through different motion directions:', 'for', 'idxMtn', 'in', 'range', '(', '0', ',', 'varNumMtnDrtn', ')', ':', '# Loop through combinations of model parameters:', 'for', 'idxMdl', 'in', 'range', '(', '0', ',', 'varChnkSze', ')', ':', '# Spatial parameters of current model:', 'varTmpX', '=', 'aryMdlParamsChnk', '[', 'idxMdl', ',', '1', ']', 'varTmpY', '=', 'aryMdlParamsChnk', '[', 'idxMdl', ',', '2', ']', 'varTmpSd', '=', 'aryMdlParamsChnk', '[', 'idxMdl', ',', '3', ']', '# Create pRF model (2D):', 'aryGauss', '=', 'crtGauss2D', '(', 'tplPngSize', '[', '0', ']', ',', 'tplPngSize', '[', '1', ']', ',', 'varTmpX', ',', 'varTmpY', ',', 'varTmpSd', ')', '# Multiply pixel-time courses with Gaussian pRF models:', 'aryPrfTcTmp', '=', 'np', '.', 'multiply', '(', 'aryBoxCar', '[', ':', ',', ':', ',', 'idxMtn', ',', ':', ']', ',', 'aryGauss', '[', ':', ',', ':', ',', 'None', ']', ')', "# Calculate sum across x- and y-dimensions - the 'area under the", "# Gaussian surface'. This is essentially an unscaled version of the", '# pRF time course model (i.e. not yet scaled for size of the pRF).', 'aryPrfTcTmp', '=', 'np', '.', 'sum', '(', 'aryPrfTcTmp', ',', 'axis', '=', '(', '0', ',', '1', ')', ')', "# Put model time courses into function's output with 2d Gaussian", '# arrray:', 'aryOut', '[', 'idxMdl', ',', 'idxMtn', ',', ':', ']', '=', 'aryPrfTcTmp', '# Put column with the indicies of model-parameter-combinations into the', '# output array (in order to be able to put the pRF model time courses into', '# the correct order after the parallelised function):', 'lstOut', '=', '[', 'idxPrc', ',', 'aryOut', ']', '# Put output to queue:', 'queOut', '.', 'put', '(', 'lstOut', ')'] | Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
tplPngSize : float, positive
Description of input 2.
varNumVol : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
queOut : float, positive
Description of input 2.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1] | ['Spatially', 'convolve', 'boxcar', 'functions', 'with', '2D', 'Gaussian', '.'] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L178-L250 |
6,690 | nok/sklearn-porter | sklearn_porter/estimator/classifier/KNeighborsClassifier/__init__.py | KNeighborsClassifier.export | def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
# Basic parameters:
self.metric = est.metric
self.n_classes = len(est.classes_)
self.n_templates = len(est._fit_X) # pylint: disable=W0212
self.n_features = len(est._fit_X[0]) # pylint: disable=W0212
self.n_neighbors = est.n_neighbors
self.algorithm = est.algorithm
self.power_param = est.p
if self.algorithm != 'brute':
from sklearn.neighbors.kd_tree import KDTree # pylint: disable-msg=E0611
from sklearn.neighbors.ball_tree import BallTree # pylint: disable-msg=E0611
tree = est._tree # pylint: disable=W0212
if isinstance(tree, (KDTree, BallTree)):
self.tree = tree
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated') | python | def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
# Basic parameters:
self.metric = est.metric
self.n_classes = len(est.classes_)
self.n_templates = len(est._fit_X) # pylint: disable=W0212
self.n_features = len(est._fit_X[0]) # pylint: disable=W0212
self.n_neighbors = est.n_neighbors
self.algorithm = est.algorithm
self.power_param = est.p
if self.algorithm != 'brute':
from sklearn.neighbors.kd_tree import KDTree # pylint: disable-msg=E0611
from sklearn.neighbors.ball_tree import BallTree # pylint: disable-msg=E0611
tree = est._tree # pylint: disable=W0212
if isinstance(tree, (KDTree, BallTree)):
self.tree = tree
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated') | ['def', 'export', '(', 'self', ',', 'class_name', ',', 'method_name', ',', 'export_data', '=', 'False', ',', 'export_dir', '=', "'.'", ',', 'export_filename', '=', "'data.json'", ',', 'export_append_checksum', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', '# Arguments:', 'self', '.', 'class_name', '=', 'class_name', 'self', '.', 'method_name', '=', 'method_name', '# Estimator:', 'est', '=', 'self', '.', 'estimator', '# Basic parameters:', 'self', '.', 'metric', '=', 'est', '.', 'metric', 'self', '.', 'n_classes', '=', 'len', '(', 'est', '.', 'classes_', ')', 'self', '.', 'n_templates', '=', 'len', '(', 'est', '.', '_fit_X', ')', '# pylint: disable=W0212', 'self', '.', 'n_features', '=', 'len', '(', 'est', '.', '_fit_X', '[', '0', ']', ')', '# pylint: disable=W0212', 'self', '.', 'n_neighbors', '=', 'est', '.', 'n_neighbors', 'self', '.', 'algorithm', '=', 'est', '.', 'algorithm', 'self', '.', 'power_param', '=', 'est', '.', 'p', 'if', 'self', '.', 'algorithm', '!=', "'brute'", ':', 'from', 'sklearn', '.', 'neighbors', '.', 'kd_tree', 'import', 'KDTree', '# pylint: disable-msg=E0611', 'from', 'sklearn', '.', 'neighbors', '.', 'ball_tree', 'import', 'BallTree', '# pylint: disable-msg=E0611', 'tree', '=', 'est', '.', '_tree', '# pylint: disable=W0212', 'if', 'isinstance', '(', 'tree', ',', '(', 'KDTree', ',', 'BallTree', ')', ')', ':', 'self', '.', 'tree', '=', 'tree', 'if', 'self', '.', 'target_method', '==', "'predict'", ':', '# Exported:', 'if', 'export_data', 'and', 'os', '.', 'path', '.', 'isdir', '(', 'export_dir', ')', ':', 'self', '.', 'export_data', '(', 'export_dir', ',', 'export_filename', ',', 'export_append_checksum', ')', 'return', 'self', '.', 'predict', '(', "'exported'", ')', '# Separated:', 'return', 'self', '.', 'predict', '(', "'separated'", ')'] | Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders. | ['Port', 'a', 'trained', 'estimator', 'to', 'the', 'syntax', 'of', 'a', 'chosen', 'programming', 'language', '.'] | train | https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/KNeighborsClassifier/__init__.py#L67-L123 |
6,691 | openfisca/openfisca-core | openfisca_core/populations.py | GroupPopulation.max | def max(self, array, role = None):
"""
Return the maximum value of ``array`` for the entity members.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.max(salaries)
>>> array([2000])
"""
return self.reduce(array, reducer = np.maximum, neutral_element = - np.infty, role = role) | python | def max(self, array, role = None):
"""
Return the maximum value of ``array`` for the entity members.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.max(salaries)
>>> array([2000])
"""
return self.reduce(array, reducer = np.maximum, neutral_element = - np.infty, role = role) | ['def', 'max', '(', 'self', ',', 'array', ',', 'role', '=', 'None', ')', ':', 'return', 'self', '.', 'reduce', '(', 'array', ',', 'reducer', '=', 'np', '.', 'maximum', ',', 'neutral_element', '=', '-', 'np', '.', 'infty', ',', 'role', '=', 'role', ')'] | Return the maximum value of ``array`` for the entity members.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.max(salaries)
>>> array([2000]) | ['Return', 'the', 'maximum', 'value', 'of', 'array', 'for', 'the', 'entity', 'members', '.'] | train | https://github.com/openfisca/openfisca-core/blob/92ce9396e29ae5d9bac5ea604cfce88517c6b35c/openfisca_core/populations.py#L365-L379 |
6,692 | jilljenn/tryalgo | tryalgo/bipartite_vertex_cover.py | bipartite_vertex_cover | def bipartite_vertex_cover(bigraph):
"""Bipartite minimum vertex cover by Koenig's theorem
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph)
:returns: boolean table for U, boolean table for V
:comment: selected vertices form a minimum vertex cover,
i.e. every edge is adjacent to at least one selected vertex
and number of selected vertices is minimum
:complexity: `O(|V|*|E|)`
"""
V = range(len(bigraph))
matchV = max_bipartite_matching(bigraph)
matchU = [None for u in V]
for v in V: # -- build the mapping from U to V
if matchV[v] is not None:
matchU[matchV[v]] = v
visitU = [False for u in V] # -- build max alternating forest
visitV = [False for v in V]
for u in V:
if matchU[u] is None: # -- starting with free vertices in U
_alternate(u, bigraph, visitU, visitV, matchV)
inverse = [not b for b in visitU]
return (inverse, visitV) | python | def bipartite_vertex_cover(bigraph):
"""Bipartite minimum vertex cover by Koenig's theorem
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph)
:returns: boolean table for U, boolean table for V
:comment: selected vertices form a minimum vertex cover,
i.e. every edge is adjacent to at least one selected vertex
and number of selected vertices is minimum
:complexity: `O(|V|*|E|)`
"""
V = range(len(bigraph))
matchV = max_bipartite_matching(bigraph)
matchU = [None for u in V]
for v in V: # -- build the mapping from U to V
if matchV[v] is not None:
matchU[matchV[v]] = v
visitU = [False for u in V] # -- build max alternating forest
visitV = [False for v in V]
for u in V:
if matchU[u] is None: # -- starting with free vertices in U
_alternate(u, bigraph, visitU, visitV, matchV)
inverse = [not b for b in visitU]
return (inverse, visitV) | ['def', 'bipartite_vertex_cover', '(', 'bigraph', ')', ':', 'V', '=', 'range', '(', 'len', '(', 'bigraph', ')', ')', 'matchV', '=', 'max_bipartite_matching', '(', 'bigraph', ')', 'matchU', '=', '[', 'None', 'for', 'u', 'in', 'V', ']', 'for', 'v', 'in', 'V', ':', '# -- build the mapping from U to V', 'if', 'matchV', '[', 'v', ']', 'is', 'not', 'None', ':', 'matchU', '[', 'matchV', '[', 'v', ']', ']', '=', 'v', 'visitU', '=', '[', 'False', 'for', 'u', 'in', 'V', ']', '# -- build max alternating forest', 'visitV', '=', '[', 'False', 'for', 'v', 'in', 'V', ']', 'for', 'u', 'in', 'V', ':', 'if', 'matchU', '[', 'u', ']', 'is', 'None', ':', '# -- starting with free vertices in U', '_alternate', '(', 'u', ',', 'bigraph', ',', 'visitU', ',', 'visitV', ',', 'matchV', ')', 'inverse', '=', '[', 'not', 'b', 'for', 'b', 'in', 'visitU', ']', 'return', '(', 'inverse', ',', 'visitV', ')'] | Bipartite minimum vertex cover by Koenig's theorem
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:assumption: U = V = {0, 1, 2, ..., n - 1} for n = len(bigraph)
:returns: boolean table for U, boolean table for V
:comment: selected vertices form a minimum vertex cover,
i.e. every edge is adjacent to at least one selected vertex
and number of selected vertices is minimum
:complexity: `O(|V|*|E|)` | ['Bipartite', 'minimum', 'vertex', 'cover', 'by', 'Koenig', 's', 'theorem'] | train | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/bipartite_vertex_cover.py#L22-L46 |
6,693 | jjjake/iamine | iamine/api.py | mine_urls | def mine_urls(urls, params=None, callback=None, **kwargs):
"""Concurrently retrieve URLs.
:param urls: A set of URLs to concurrently retrieve.
:type urls: iterable
:param params: (optional) The URL parameters to send with each
request.
:type params: dict
:param callback: (optional) A callback function to be called on each
:py:class:`aiohttp.client.ClientResponse`.
:param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
"""
miner = Miner(**kwargs)
try:
miner.loop.add_signal_handler(signal.SIGINT, miner.close)
miner.loop.run_until_complete(miner.mine_urls(urls, params, callback))
except RuntimeError:
pass | python | def mine_urls(urls, params=None, callback=None, **kwargs):
"""Concurrently retrieve URLs.
:param urls: A set of URLs to concurrently retrieve.
:type urls: iterable
:param params: (optional) The URL parameters to send with each
request.
:type params: dict
:param callback: (optional) A callback function to be called on each
:py:class:`aiohttp.client.ClientResponse`.
:param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
"""
miner = Miner(**kwargs)
try:
miner.loop.add_signal_handler(signal.SIGINT, miner.close)
miner.loop.run_until_complete(miner.mine_urls(urls, params, callback))
except RuntimeError:
pass | ['def', 'mine_urls', '(', 'urls', ',', 'params', '=', 'None', ',', 'callback', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'miner', '=', 'Miner', '(', '*', '*', 'kwargs', ')', 'try', ':', 'miner', '.', 'loop', '.', 'add_signal_handler', '(', 'signal', '.', 'SIGINT', ',', 'miner', '.', 'close', ')', 'miner', '.', 'loop', '.', 'run_until_complete', '(', 'miner', '.', 'mine_urls', '(', 'urls', ',', 'params', ',', 'callback', ')', ')', 'except', 'RuntimeError', ':', 'pass'] | Concurrently retrieve URLs.
:param urls: A set of URLs to concurrently retrieve.
:type urls: iterable
:param params: (optional) The URL parameters to send with each
request.
:type params: dict
:param callback: (optional) A callback function to be called on each
:py:class:`aiohttp.client.ClientResponse`.
:param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. | ['Concurrently', 'retrieve', 'URLs', '.'] | train | https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/api.py#L59-L79 |
6,694 | SheffieldML/GPyOpt | GPyOpt/util/general.py | merge_values | def merge_values(values1,values2):
'''
Merges two numpy arrays by calculating all possible combinations of rows
'''
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1,row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array) | python | def merge_values(values1,values2):
'''
Merges two numpy arrays by calculating all possible combinations of rows
'''
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1,row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array) | ['def', 'merge_values', '(', 'values1', ',', 'values2', ')', ':', 'array1', '=', 'values_to_array', '(', 'values1', ')', 'array2', '=', 'values_to_array', '(', 'values2', ')', 'if', 'array1', '.', 'size', '==', '0', ':', 'return', 'array2', 'if', 'array2', '.', 'size', '==', '0', ':', 'return', 'array1', 'merged_array', '=', '[', ']', 'for', 'row_array1', 'in', 'array1', ':', 'for', 'row_array2', 'in', 'array2', ':', 'merged_row', '=', 'np', '.', 'hstack', '(', '(', 'row_array1', ',', 'row_array2', ')', ')', 'merged_array', '.', 'append', '(', 'merged_row', ')', 'return', 'np', '.', 'atleast_2d', '(', 'merged_array', ')'] | Merges two numpy arrays by calculating all possible combinations of rows | ['Merges', 'two', 'numpy', 'arrays', 'by', 'calculating', 'all', 'possible', 'combinations', 'of', 'rows'] | train | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/util/general.py#L183-L200 |
6,695 | PlaidWeb/Publ | publ/index.py | ConcurrentSet.remove | def remove(self, item):
""" Remove an item from the set, returning if it was present """
with self.lock:
if item in self.set:
self.set.remove(item)
return True
return False | python | def remove(self, item):
""" Remove an item from the set, returning if it was present """
with self.lock:
if item in self.set:
self.set.remove(item)
return True
return False | ['def', 'remove', '(', 'self', ',', 'item', ')', ':', 'with', 'self', '.', 'lock', ':', 'if', 'item', 'in', 'self', '.', 'set', ':', 'self', '.', 'set', '.', 'remove', '(', 'item', ')', 'return', 'True', 'return', 'False'] | Remove an item from the set, returning if it was present | ['Remove', 'an', 'item', 'from', 'the', 'set', 'returning', 'if', 'it', 'was', 'present'] | train | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/index.py#L46-L52 |
6,696 | materialsproject/pymatgen | pymatgen/io/vasp/inputs.py | Incar.get_string | def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \
(self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines],
tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n" | python | def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \
(self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines],
tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n" | ['def', 'get_string', '(', 'self', ',', 'sort_keys', '=', 'False', ',', 'pretty', '=', 'False', ')', ':', 'keys', '=', 'self', '.', 'keys', '(', ')', 'if', 'sort_keys', ':', 'keys', '=', 'sorted', '(', 'keys', ')', 'lines', '=', '[', ']', 'for', 'k', 'in', 'keys', ':', 'if', 'k', '==', '"MAGMOM"', 'and', 'isinstance', '(', 'self', '[', 'k', ']', ',', 'list', ')', ':', 'value', '=', '[', ']', 'if', '(', 'isinstance', '(', 'self', '[', 'k', ']', '[', '0', ']', ',', 'list', ')', 'or', 'isinstance', '(', 'self', '[', 'k', ']', '[', '0', ']', ',', 'Magmom', ')', ')', 'and', '(', 'self', '.', 'get', '(', '"LSORBIT"', ')', 'or', 'self', '.', 'get', '(', '"LNONCOLLINEAR"', ')', ')', ':', 'value', '.', 'append', '(', '" "', '.', 'join', '(', 'str', '(', 'i', ')', 'for', 'j', 'in', 'self', '[', 'k', ']', 'for', 'i', 'in', 'j', ')', ')', 'elif', 'self', '.', 'get', '(', '"LSORBIT"', ')', 'or', 'self', '.', 'get', '(', '"LNONCOLLINEAR"', ')', ':', 'for', 'm', ',', 'g', 'in', 'itertools', '.', 'groupby', '(', 'self', '[', 'k', ']', ')', ':', 'value', '.', 'append', '(', '"3*{}*{}"', '.', 'format', '(', 'len', '(', 'tuple', '(', 'g', ')', ')', ',', 'm', ')', ')', 'else', ':', '# float() to ensure backwards compatibility between', '# float magmoms and Magmom objects', 'for', 'm', ',', 'g', 'in', 'itertools', '.', 'groupby', '(', 'self', '[', 'k', ']', ',', 'lambda', 'x', ':', 'float', '(', 'x', ')', ')', ':', 'value', '.', 'append', '(', '"{}*{}"', '.', 'format', '(', 'len', '(', 'tuple', '(', 'g', ')', ')', ',', 'm', ')', ')', 'lines', '.', 'append', '(', '[', 'k', ',', '" "', '.', 'join', '(', 'value', ')', ']', ')', 'elif', 'isinstance', '(', 'self', '[', 'k', ']', ',', 'list', ')', ':', 'lines', '.', 'append', '(', '[', 'k', ',', '" "', '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', 'self', '[', 'k', ']', ']', ')', ']', ')', 'else', ':', 'lines', '.', 'append', '(', '[', 'k', ',', 'self', '[', 'k', ']', ']', ')', 'if', 'pretty', ':', 'return', 'str', '(', 'tabulate', '(', '[', '[', 'l', '[', '0', ']', ',', '"="', ',', 'l', '[', '1', ']', ']', 'for', 'l', 'in', 'lines', ']', ',', 'tablefmt', '=', '"plain"', ')', ')', 'else', ':', 'return', 'str_delimited', '(', 'lines', ',', 'None', ',', '" = "', ')', '+', '"\\n"'] | Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False. | ['Returns', 'a', 'string', 'representation', 'of', 'the', 'INCAR', '.', 'The', 'reason', 'why', 'this', 'method', 'is', 'different', 'from', 'the', '__str__', 'method', 'is', 'to', 'provide', 'options', 'for', 'pretty', 'printing', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L658-L700 |
6,697 | RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/orm_inspect.py | gen_relationships | def gen_relationships(obj) -> Generator[Tuple[str, RelationshipProperty, Type],
None, None]:
"""
Yields tuples of ``(attrname, RelationshipProperty, related_class)``
for all relationships of an ORM object.
The object 'obj' can be EITHER an instance OR a class.
"""
insp = inspect(obj) # type: InstanceState
# insp.mapper.relationships is of type
# sqlalchemy.utils._collections.ImmutableProperties, which is basically
# a sort of AttrDict.
for attrname, rel_prop in insp.mapper.relationships.items(): # type: Tuple[str, RelationshipProperty] # noqa
# noinspection PyUnresolvedReferences
related_class = rel_prop.mapper.class_
# log.critical("gen_relationships: attrname={!r}, "
# "rel_prop={!r}, related_class={!r}, rel_prop.info={!r}",
# attrname, rel_prop, related_class, rel_prop.info)
yield attrname, rel_prop, related_class | python | def gen_relationships(obj) -> Generator[Tuple[str, RelationshipProperty, Type],
None, None]:
"""
Yields tuples of ``(attrname, RelationshipProperty, related_class)``
for all relationships of an ORM object.
The object 'obj' can be EITHER an instance OR a class.
"""
insp = inspect(obj) # type: InstanceState
# insp.mapper.relationships is of type
# sqlalchemy.utils._collections.ImmutableProperties, which is basically
# a sort of AttrDict.
for attrname, rel_prop in insp.mapper.relationships.items(): # type: Tuple[str, RelationshipProperty] # noqa
# noinspection PyUnresolvedReferences
related_class = rel_prop.mapper.class_
# log.critical("gen_relationships: attrname={!r}, "
# "rel_prop={!r}, related_class={!r}, rel_prop.info={!r}",
# attrname, rel_prop, related_class, rel_prop.info)
yield attrname, rel_prop, related_class | ['def', 'gen_relationships', '(', 'obj', ')', '->', 'Generator', '[', 'Tuple', '[', 'str', ',', 'RelationshipProperty', ',', 'Type', ']', ',', 'None', ',', 'None', ']', ':', 'insp', '=', 'inspect', '(', 'obj', ')', '# type: InstanceState', '# insp.mapper.relationships is of type', '# sqlalchemy.utils._collections.ImmutableProperties, which is basically', '# a sort of AttrDict.', 'for', 'attrname', ',', 'rel_prop', 'in', 'insp', '.', 'mapper', '.', 'relationships', '.', 'items', '(', ')', ':', '# type: Tuple[str, RelationshipProperty] # noqa', '# noinspection PyUnresolvedReferences', 'related_class', '=', 'rel_prop', '.', 'mapper', '.', 'class_', '# log.critical("gen_relationships: attrname={!r}, "', '# "rel_prop={!r}, related_class={!r}, rel_prop.info={!r}",', '# attrname, rel_prop, related_class, rel_prop.info)', 'yield', 'attrname', ',', 'rel_prop', ',', 'related_class'] | Yields tuples of ``(attrname, RelationshipProperty, related_class)``
for all relationships of an ORM object.
The object 'obj' can be EITHER an instance OR a class. | ['Yields', 'tuples', 'of', '(', 'attrname', 'RelationshipProperty', 'related_class', ')', 'for', 'all', 'relationships', 'of', 'an', 'ORM', 'object', '.', 'The', 'object', 'obj', 'can', 'be', 'EITHER', 'an', 'instance', 'OR', 'a', 'class', '.'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_inspect.py#L628-L645 |
6,698 | ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI.get_resource | def get_resource(self, session, query, api_type, obj_id):
"""
Fetch a resource.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: Type of the resource
:param obj_id: ID of the resource
"""
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.VIEW)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
response = JSONAPIResponse()
built = self._render_full_resource(resource, include, fields)
response.data['included'] = list(built.pop('included').values())
response.data['data'] = built
return response | python | def get_resource(self, session, query, api_type, obj_id):
"""
Fetch a resource.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: Type of the resource
:param obj_id: ID of the resource
"""
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.VIEW)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
response = JSONAPIResponse()
built = self._render_full_resource(resource, include, fields)
response.data['included'] = list(built.pop('included').values())
response.data['data'] = built
return response | ['def', 'get_resource', '(', 'self', ',', 'session', ',', 'query', ',', 'api_type', ',', 'obj_id', ')', ':', 'resource', '=', 'self', '.', '_fetch_resource', '(', 'session', ',', 'api_type', ',', 'obj_id', ',', 'Permissions', '.', 'VIEW', ')', 'include', '=', 'self', '.', '_parse_include', '(', 'query', '.', 'get', '(', "'include'", ',', "''", ')', '.', 'split', '(', "','", ')', ')', 'fields', '=', 'self', '.', '_parse_fields', '(', 'query', ')', 'response', '=', 'JSONAPIResponse', '(', ')', 'built', '=', 'self', '.', '_render_full_resource', '(', 'resource', ',', 'include', ',', 'fields', ')', 'response', '.', 'data', '[', "'included'", ']', '=', 'list', '(', 'built', '.', 'pop', '(', "'included'", ')', '.', 'values', '(', ')', ')', 'response', '.', 'data', '[', "'data'", ']', '=', 'built', 'return', 'response'] | Fetch a resource.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: Type of the resource
:param obj_id: ID of the resource | ['Fetch', 'a', 'resource', '.'] | train | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L692-L713 |
6,699 | PyCQA/astroid | astroid/scoped_nodes.py | ClassDef.getattr | def getattr(self, name, context=None, class_context=True):
"""Get an attribute from this class, using Python's attribute semantic.
This method doesn't look in the :attr:`instance_attrs` dictionary
since it is done by an :class:`Instance` proxy at inference time.
It may return an :class:`Uninferable` object if
the attribute has not been
found, but a ``__getattr__`` or ``__getattribute__`` method is defined.
If ``class_context`` is given, then it is considered that the
attribute is accessed from a class context,
e.g. ClassDef.attribute, otherwise it might have been accessed
from an instance as well. If ``class_context`` is used in that
case, then a lookup in the implicit metaclass and the explicit
metaclass will be done.
:param name: The attribute to look for.
:type name: str
:param class_context: Whether the attribute can be accessed statically.
:type class_context: bool
:returns: The attribute.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If the attribute cannot be inferred.
"""
values = self.locals.get(name, [])
if name in self.special_attributes and class_context and not values:
result = [self.special_attributes.lookup(name)]
if name == "__bases__":
# Need special treatment, since they are mutable
# and we need to return all the values.
result += values
return result
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if class_context:
values += self._metaclass_lookup_attribute(name, context)
if not values:
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
)
# Look for AnnAssigns, which are not attributes in the purest sense.
for value in values:
if isinstance(value, node_classes.AssignName):
stmt = value.statement()
if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None:
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
)
return values | python | def getattr(self, name, context=None, class_context=True):
"""Get an attribute from this class, using Python's attribute semantic.
This method doesn't look in the :attr:`instance_attrs` dictionary
since it is done by an :class:`Instance` proxy at inference time.
It may return an :class:`Uninferable` object if
the attribute has not been
found, but a ``__getattr__`` or ``__getattribute__`` method is defined.
If ``class_context`` is given, then it is considered that the
attribute is accessed from a class context,
e.g. ClassDef.attribute, otherwise it might have been accessed
from an instance as well. If ``class_context`` is used in that
case, then a lookup in the implicit metaclass and the explicit
metaclass will be done.
:param name: The attribute to look for.
:type name: str
:param class_context: Whether the attribute can be accessed statically.
:type class_context: bool
:returns: The attribute.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If the attribute cannot be inferred.
"""
values = self.locals.get(name, [])
if name in self.special_attributes and class_context and not values:
result = [self.special_attributes.lookup(name)]
if name == "__bases__":
# Need special treatment, since they are mutable
# and we need to return all the values.
result += values
return result
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if class_context:
values += self._metaclass_lookup_attribute(name, context)
if not values:
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
)
# Look for AnnAssigns, which are not attributes in the purest sense.
for value in values:
if isinstance(value, node_classes.AssignName):
stmt = value.statement()
if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None:
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
)
return values | ['def', 'getattr', '(', 'self', ',', 'name', ',', 'context', '=', 'None', ',', 'class_context', '=', 'True', ')', ':', 'values', '=', 'self', '.', 'locals', '.', 'get', '(', 'name', ',', '[', ']', ')', 'if', 'name', 'in', 'self', '.', 'special_attributes', 'and', 'class_context', 'and', 'not', 'values', ':', 'result', '=', '[', 'self', '.', 'special_attributes', '.', 'lookup', '(', 'name', ')', ']', 'if', 'name', '==', '"__bases__"', ':', '# Need special treatment, since they are mutable', '# and we need to return all the values.', 'result', '+=', 'values', 'return', 'result', "# don't modify the list in self.locals!", 'values', '=', 'list', '(', 'values', ')', 'for', 'classnode', 'in', 'self', '.', 'ancestors', '(', 'recurs', '=', 'True', ',', 'context', '=', 'context', ')', ':', 'values', '+=', 'classnode', '.', 'locals', '.', 'get', '(', 'name', ',', '[', ']', ')', 'if', 'class_context', ':', 'values', '+=', 'self', '.', '_metaclass_lookup_attribute', '(', 'name', ',', 'context', ')', 'if', 'not', 'values', ':', 'raise', 'exceptions', '.', 'AttributeInferenceError', '(', 'target', '=', 'self', ',', 'attribute', '=', 'name', ',', 'context', '=', 'context', ')', '# Look for AnnAssigns, which are not attributes in the purest sense.', 'for', 'value', 'in', 'values', ':', 'if', 'isinstance', '(', 'value', ',', 'node_classes', '.', 'AssignName', ')', ':', 'stmt', '=', 'value', '.', 'statement', '(', ')', 'if', 'isinstance', '(', 'stmt', ',', 'node_classes', '.', 'AnnAssign', ')', 'and', 'stmt', '.', 'value', 'is', 'None', ':', 'raise', 'exceptions', '.', 'AttributeInferenceError', '(', 'target', '=', 'self', ',', 'attribute', '=', 'name', ',', 'context', '=', 'context', ')', 'return', 'values'] | Get an attribute from this class, using Python's attribute semantic.
This method doesn't look in the :attr:`instance_attrs` dictionary
since it is done by an :class:`Instance` proxy at inference time.
It may return an :class:`Uninferable` object if
the attribute has not been
found, but a ``__getattr__`` or ``__getattribute__`` method is defined.
If ``class_context`` is given, then it is considered that the
attribute is accessed from a class context,
e.g. ClassDef.attribute, otherwise it might have been accessed
from an instance as well. If ``class_context`` is used in that
case, then a lookup in the implicit metaclass and the explicit
metaclass will be done.
:param name: The attribute to look for.
:type name: str
:param class_context: Whether the attribute can be accessed statically.
:type class_context: bool
:returns: The attribute.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If the attribute cannot be inferred. | ['Get', 'an', 'attribute', 'from', 'this', 'class', 'using', 'Python', 's', 'attribute', 'semantic', '.'] | train | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L2323-L2379 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.