index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
50,728 |
iptools
|
_address2long
|
Convert an address string to a long.
|
def _address2long(address):
"""
Convert an address string to a long.
"""
parsed = ipv4.ip2long(address)
if parsed is None:
parsed = ipv6.ip2long(address)
return parsed
|
(address)
|
50,732 |
molzip.classifier
|
ZipClassifier
| null |
class ZipClassifier(object):
def __init__(self) -> "ZipClassifier":
pass
def fit_predict(
self,
X_train: Iterable[str],
y_train: Iterable,
X: Iterable[str],
k: int = 5,
class_weights: Optional[Iterable] = None,
) -> np.ndarray:
preds = []
y_train = np.array(y_train)
if len(y_train.shape) == 1:
y_train = np.expand_dims(y_train, axis=1)
cpu_count = multiprocessing.cpu_count()
with multiprocessing.Pool(cpu_count) as p:
preds = p.map(
partial(
classify,
X_train=X_train,
y_train=y_train,
k=k,
class_weights=class_weights,
),
X,
)
return np.array(preds)
|
() -> 'ZipClassifier'
|
50,733 |
molzip.classifier
|
__init__
| null |
def __init__(self) -> "ZipClassifier":
pass
|
(self) -> molzip.classifier.ZipClassifier
|
50,734 |
molzip.classifier
|
fit_predict
| null |
def fit_predict(
self,
X_train: Iterable[str],
y_train: Iterable,
X: Iterable[str],
k: int = 5,
class_weights: Optional[Iterable] = None,
) -> np.ndarray:
preds = []
y_train = np.array(y_train)
if len(y_train.shape) == 1:
y_train = np.expand_dims(y_train, axis=1)
cpu_count = multiprocessing.cpu_count()
with multiprocessing.Pool(cpu_count) as p:
preds = p.map(
partial(
classify,
X_train=X_train,
y_train=y_train,
k=k,
class_weights=class_weights,
),
X,
)
return np.array(preds)
|
(self, X_train: Iterable[str], y_train: Iterable, X: Iterable[str], k: int = 5, class_weights: Optional[Iterable] = None) -> numpy.ndarray
|
50,735 |
molzip.knn_graph
|
ZipKNNGraph
| null |
class ZipKNNGraph(object):
def __init__(self) -> "ZipKNNGraph":
pass
def fit_predict(self, X, k):
edge_list = []
cpu_count = multiprocessing.cpu_count()
with multiprocessing.Pool(cpu_count) as p:
edge_list = p.map(
partial(
get_knn,
X=X,
k=k,
),
list(zip(X, range(len(X)))),
)
# Returned the flattened list
return [item for sublist in edge_list for item in sublist]
|
() -> 'ZipKNNGraph'
|
50,736 |
molzip.knn_graph
|
__init__
| null |
def __init__(self) -> "ZipKNNGraph":
pass
|
(self) -> molzip.knn_graph.ZipKNNGraph
|
50,737 |
molzip.knn_graph
|
fit_predict
| null |
def fit_predict(self, X, k):
edge_list = []
cpu_count = multiprocessing.cpu_count()
with multiprocessing.Pool(cpu_count) as p:
edge_list = p.map(
partial(
get_knn,
X=X,
k=k,
),
list(zip(X, range(len(X)))),
)
# Returned the flattened list
return [item for sublist in edge_list for item in sublist]
|
(self, X, k)
|
50,738 |
molzip.regressor
|
ZipRegressor
| null |
class ZipRegressor(object):
def __init__(self) -> "ZipRegressor":
pass
def fit_predict(
self, X_train: Iterable[str], y_train: Iterable, X: Iterable[str], k: int = 25
) -> np.ndarray:
preds = []
y_train = np.array(y_train)
if len(y_train.shape) == 1:
y_train = np.expand_dims(y_train, axis=1)
cpu_count = multiprocessing.cpu_count()
with multiprocessing.Pool(cpu_count) as p:
preds = p.map(
partial(
regress,
X_train=X_train,
y_train=y_train,
k=k,
),
X,
)
return np.array(preds)
|
() -> 'ZipRegressor'
|
50,739 |
molzip.regressor
|
__init__
| null |
def __init__(self) -> "ZipRegressor":
pass
|
(self) -> molzip.regressor.ZipRegressor
|
50,740 |
molzip.regressor
|
fit_predict
| null |
def fit_predict(
self, X_train: Iterable[str], y_train: Iterable, X: Iterable[str], k: int = 25
) -> np.ndarray:
preds = []
y_train = np.array(y_train)
if len(y_train.shape) == 1:
y_train = np.expand_dims(y_train, axis=1)
cpu_count = multiprocessing.cpu_count()
with multiprocessing.Pool(cpu_count) as p:
preds = p.map(
partial(
regress,
X_train=X_train,
y_train=y_train,
k=k,
),
X,
)
return np.array(preds)
|
(self, X_train: Iterable[str], y_train: Iterable, X: Iterable[str], k: int = 25) -> numpy.ndarray
|
50,744 |
iterminal.commands
|
Commands
| null |
class Commands(Component):
def __init__(self):
super().__init__('shell_commands')
def reload(self, shell):
import threading
shell.deinit()
# Stop any running threads that are
# not the main thread
for thread in threading.enumerate():
if thread is not threading.main_thread() and thread.is_alive():
stop = getattr(thread, 'stop', None)
if stop:
stop()
shell.init()
def list_components(self, shell):
for i, c in enumerate(shell.components):
print('{}: {}'.format(i, c.name))
def init(self, shell):
shell.set_local('list_components', \
lambda: self.list_components(shell))
shell.set_local('reload', \
lambda: self.reload(shell))
def dispose(self, shell):
shell.unset_local('list_components')
shell.unset_local('reload')
|
()
|
50,745 |
iterminal.commands
|
__init__
| null |
def __init__(self):
super().__init__('shell_commands')
|
(self)
|
50,746 |
iterminal.commands
|
dispose
| null |
def dispose(self, shell):
shell.unset_local('list_components')
shell.unset_local('reload')
|
(self, shell)
|
50,747 |
iterminal.commands
|
init
| null |
def init(self, shell):
shell.set_local('list_components', \
lambda: self.list_components(shell))
shell.set_local('reload', \
lambda: self.reload(shell))
|
(self, shell)
|
50,748 |
iterminal.commands
|
list_components
| null |
def list_components(self, shell):
for i, c in enumerate(shell.components):
print('{}: {}'.format(i, c.name))
|
(self, shell)
|
50,749 |
iterminal.commands
|
reload
| null |
def reload(self, shell):
import threading
shell.deinit()
# Stop any running threads that are
# not the main thread
for thread in threading.enumerate():
if thread is not threading.main_thread() and thread.is_alive():
stop = getattr(thread, 'stop', None)
if stop:
stop()
shell.init()
|
(self, shell)
|
50,750 |
iterminal.component
|
Component
| null |
class Component:
def __init__(self, name):
self.name = name
def init(self, shell):
pass
def dispose(self, shell):
pass
|
(name)
|
50,752 |
iterminal.component
|
dispose
| null |
def dispose(self, shell):
pass
|
(self, shell)
|
50,753 |
iterminal.component
|
init
| null |
def init(self, shell):
pass
|
(self, shell)
|
50,754 |
iterminal.console
|
ConsoleCommands
| null |
class ConsoleCommands(Component):
def __init__(self, console):
self._console = console
super().__init__('console_commands')
# The commands
def clear(self):
self._console.clear()
def init(self, shell):
shell.set_local('clear', self.clear)
def dispose(self, shell):
shell.unset_local('clear')
|
(console)
|
50,755 |
iterminal.console
|
__init__
| null |
def __init__(self, console):
self._console = console
super().__init__('console_commands')
|
(self, console)
|
50,756 |
iterminal.console
|
clear
| null |
def clear(self):
self._console.clear()
|
(self)
|
50,757 |
iterminal.console
|
dispose
| null |
def dispose(self, shell):
shell.unset_local('clear')
|
(self, shell)
|
50,758 |
iterminal.console
|
init
| null |
def init(self, shell):
shell.set_local('clear', self.clear)
|
(self, shell)
|
50,759 |
iterminal.console
|
ConsoleTerminal
| null |
class ConsoleTerminal:
def __init__(self):
import sys
try:
import readline
except ImportError:
pass
self.stdout = sys.stdout
self.stderr = sys.stderr
self.stdin = sys.stdin
self._input_listeners = []
def add_prompt_listener(self, l):
self._input_listeners.append(l)
def prompt(self, prompt=''):
i = input(prompt)
for l in self._input_listeners:
l(prompt, i)
return i
def clear(self):
import os
os.system('clear')
def run(self, shell):
shell.init()
more = False
while True:
try:
prompt = '... ' if more else '>>> '
try:
line = self.prompt(prompt) # TODO: Handle if we want to use something else besides stdin
except EOFError:
self.stdout.write('\n')
break
else:
try:
more = not shell.process_input(line, self.stdout, self.stderr, self.stdin)
except InterpretError as e:
print(e)
except KeyboardInterrupt:
self.stdout.write('\nKeyboardInterrupt\n')
# Clear the input buffer
shell._buffer.clear()
more = False
|
()
|
50,760 |
iterminal.console
|
__init__
| null |
def __init__(self):
import sys
try:
import readline
except ImportError:
pass
self.stdout = sys.stdout
self.stderr = sys.stderr
self.stdin = sys.stdin
self._input_listeners = []
|
(self)
|
50,761 |
iterminal.console
|
add_prompt_listener
| null |
def add_prompt_listener(self, l):
self._input_listeners.append(l)
|
(self, l)
|
50,762 |
iterminal.console
|
clear
| null |
def clear(self):
import os
os.system('clear')
|
(self)
|
50,763 |
iterminal.console
|
prompt
| null |
def prompt(self, prompt=''):
i = input(prompt)
for l in self._input_listeners:
l(prompt, i)
return i
|
(self, prompt='')
|
50,764 |
iterminal.console
|
run
| null |
def run(self, shell):
shell.init()
more = False
while True:
try:
prompt = '... ' if more else '>>> '
try:
line = self.prompt(prompt) # TODO: Handle if we want to use something else besides stdin
except EOFError:
self.stdout.write('\n')
break
else:
try:
more = not shell.process_input(line, self.stdout, self.stderr, self.stdin)
except InterpretError as e:
print(e)
except KeyboardInterrupt:
self.stdout.write('\nKeyboardInterrupt\n')
# Clear the input buffer
shell._buffer.clear()
more = False
|
(self, shell)
|
50,765 |
iterminal.interpreter
|
InterpreterSetup
| null |
class InterpreterSetup(Component):
def __init__(self):
super().__init__('interpreter')
self._blacklist = ['codecs', 'encodings', '__main__', 'logbook',
'io', 'abc', 'site', 'builtins', 'sys']
def init(self, shell):
shell.clear_locals()
def dispose(self, shell):
shell.clear_locals()
# Delete all of the modules
for name in list(sys.modules.keys()):
if not name.startswith(tuple(self._blacklist)):
del sys.modules[name]
|
()
|
50,766 |
iterminal.interpreter
|
__init__
| null |
def __init__(self):
super().__init__('interpreter')
self._blacklist = ['codecs', 'encodings', '__main__', 'logbook',
'io', 'abc', 'site', 'builtins', 'sys']
|
(self)
|
50,767 |
iterminal.interpreter
|
dispose
| null |
def dispose(self, shell):
shell.clear_locals()
# Delete all of the modules
for name in list(sys.modules.keys()):
if not name.startswith(tuple(self._blacklist)):
del sys.modules[name]
|
(self, shell)
|
50,768 |
iterminal.interpreter
|
init
| null |
def init(self, shell):
shell.clear_locals()
|
(self, shell)
|
50,769 |
iterminal.shell
|
LoadScript
| null |
class LoadScript(Component):
def __init__(self, res):
super().__init__('load {}'.format(res))
self._res = res
def init(self, shell):
if not shell.run_resource(self._res):
print('Couldn\'t load script {}'.format(self._res))
def dispose(self, shell):
pass
|
(res)
|
50,770 |
iterminal.shell
|
__init__
| null |
def __init__(self, res):
super().__init__('load {}'.format(res))
self._res = res
|
(self, res)
|
50,772 |
iterminal.shell
|
init
| null |
def init(self, shell):
if not shell.run_resource(self._res):
print('Couldn\'t load script {}'.format(self._res))
|
(self, shell)
|
50,773 |
insteonterminal
|
LoggingSetup
| null |
class LoggingSetup(Component):
def __init__(self, stdout, stderr):
super().__init__('logging')
self._stdout = stdout
self._stderr = stderr
self._original_print = builtins.print
self._stream_handler = logbook.StreamHandler(self._stdout, bubble=True,
filter=lambda r,h: r.channel != 'console' and r.channel != 'prompt')
def format(record, handler):
if record.level != logbook.INFO:
return logbook.get_level_name(record.level) + ': ' + record.message
else:
return record.message
self._stream_handler.formatter = format
self._file_handler = logbook.FileHandler(os.path.join(log_dir, 'insteon-terminal.log'),
bubble=True)
def init(self, shell):
def print(*args, **kwargs):
self._original_print(*args, **kwargs)
lines = ' '.join(map(str, args)).split('\n')
for l in lines:
console_logger.info(l)
builtins.print = print
self._file_handler.push_application()
self._stream_handler.push_application()
def dispose(self, shell):
builtins.print = self._original_print
self._stream_handler.pop_application()
self._file_handler.pop_application()
|
(stdout, stderr)
|
50,774 |
insteonterminal
|
__init__
| null |
def __init__(self, stdout, stderr):
super().__init__('logging')
self._stdout = stdout
self._stderr = stderr
self._original_print = builtins.print
self._stream_handler = logbook.StreamHandler(self._stdout, bubble=True,
filter=lambda r,h: r.channel != 'console' and r.channel != 'prompt')
def format(record, handler):
if record.level != logbook.INFO:
return logbook.get_level_name(record.level) + ': ' + record.message
else:
return record.message
self._stream_handler.formatter = format
self._file_handler = logbook.FileHandler(os.path.join(log_dir, 'insteon-terminal.log'),
bubble=True)
|
(self, stdout, stderr)
|
50,775 |
insteonterminal
|
dispose
| null |
def dispose(self, shell):
builtins.print = self._original_print
self._stream_handler.pop_application()
self._file_handler.pop_application()
|
(self, shell)
|
50,776 |
insteonterminal
|
init
| null |
def init(self, shell):
def print(*args, **kwargs):
self._original_print(*args, **kwargs)
lines = ' '.join(map(str, args)).split('\n')
for l in lines:
console_logger.info(l)
builtins.print = print
self._file_handler.push_application()
self._stream_handler.push_application()
|
(self, shell)
|
50,777 |
iterminal.shell
|
Shell
| null |
class Shell:
def __init__(self):
self._interpreter = Interpreter()
self._resource_paths = []
self._components = []
self._buffer = [] # Holds partial code
# Component-related methods
@property
def components(self):
return self._components
def add_component(self, comp):
self._components.append(comp)
def init(self):
for comp in self._components:
comp.init(self)
def deinit(self):
for comp in reversed(self._components):
comp.dispose(self)
# Resource-related methods
def add_resource_path(self, path):
self._resource_paths.append(path)
def resolve_resource(self, filename):
for l in self._resource_paths:
path = os.path.join(l, filename)
if os.path.isfile(path):
return path
return None
def run_resource(self, filename):
res = self.resolve_resource(filename)
if res:
self._interpreter.exec_file(res)
return True
else:
return False
# Input-related methods
def set_local(self, name, val):
self._interpreter.locals[name] = val
def unset_local(self, name):
del self._interpreter.locals[name]
def clear_locals(self):
self._interpreter.locals.clear()
def process_input(self, line, stdout, stdin, stderr):
self._buffer.append(line)
try:
code = self._interpreter.try_compile('\n'.join(self._buffer))
except InterpretError:
self._buffer.clear()
raise
if not code:
return None # We need to get more input
self._buffer.clear()
return str(self._interpreter.exec_code(code, stdout, stdin, stderr))
|
()
|
50,778 |
iterminal.shell
|
__init__
| null |
def __init__(self):
self._interpreter = Interpreter()
self._resource_paths = []
self._components = []
self._buffer = [] # Holds partial code
|
(self)
|
50,779 |
iterminal.shell
|
add_component
| null |
def add_component(self, comp):
self._components.append(comp)
|
(self, comp)
|
50,780 |
iterminal.shell
|
add_resource_path
| null |
def add_resource_path(self, path):
self._resource_paths.append(path)
|
(self, path)
|
50,781 |
iterminal.shell
|
clear_locals
| null |
def clear_locals(self):
self._interpreter.locals.clear()
|
(self)
|
50,782 |
iterminal.shell
|
deinit
| null |
def deinit(self):
for comp in reversed(self._components):
comp.dispose(self)
|
(self)
|
50,783 |
iterminal.shell
|
init
| null |
def init(self):
for comp in self._components:
comp.init(self)
|
(self)
|
50,784 |
iterminal.shell
|
process_input
| null |
def process_input(self, line, stdout, stdin, stderr):
self._buffer.append(line)
try:
code = self._interpreter.try_compile('\n'.join(self._buffer))
except InterpretError:
self._buffer.clear()
raise
if not code:
return None # We need to get more input
self._buffer.clear()
return str(self._interpreter.exec_code(code, stdout, stdin, stderr))
|
(self, line, stdout, stdin, stderr)
|
50,785 |
iterminal.shell
|
resolve_resource
| null |
def resolve_resource(self, filename):
for l in self._resource_paths:
path = os.path.join(l, filename)
if os.path.isfile(path):
return path
return None
|
(self, filename)
|
50,786 |
iterminal.shell
|
run_resource
| null |
def run_resource(self, filename):
res = self.resolve_resource(filename)
if res:
self._interpreter.exec_file(res)
return True
else:
return False
|
(self, filename)
|
50,787 |
iterminal.shell
|
set_local
| null |
def set_local(self, name, val):
self._interpreter.locals[name] = val
|
(self, name, val)
|
50,788 |
iterminal.shell
|
unset_local
| null |
def unset_local(self, name):
del self._interpreter.locals[name]
|
(self, name)
|
50,789 |
insteonterminal
|
SysConfig
| null |
class SysConfig(Component):
def __init__(self):
super().__init__('sys_config')
def init(self, shell):
import insteon.io.xmlmsgreader
shell.set_local('definitions', insteon.io.xmlmsgreader.read_default_xml())
def dipose(self, shell):
shell.unset_local('definitions')
|
()
|
50,790 |
insteonterminal
|
__init__
| null |
def __init__(self):
super().__init__('sys_config')
|
(self)
|
50,791 |
insteonterminal
|
dipose
| null |
def dipose(self, shell):
shell.unset_local('definitions')
|
(self, shell)
|
50,793 |
insteonterminal
|
init
| null |
def init(self, shell):
import insteon.io.xmlmsgreader
shell.set_local('definitions', insteon.io.xmlmsgreader.read_default_xml())
|
(self, shell)
|
50,798 |
insteonterminal
|
run
| null |
def run():
if not os.path.exists(config_dir):
os.makedirs(config_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
shell = Shell()
shell.add_resource_path('.')
shell.add_resource_path(config_dir)
terminal = ConsoleTerminal()
terminal.add_prompt_listener(lambda p,v: prompt_logger.info(v))
shell.add_component(InterpreterSetup())
shell.add_component(LoggingSetup(terminal.stdout, terminal.stderr))
shell.add_component(Commands())
shell.add_component(ConsoleCommands(terminal))
shell.add_component(SysConfig())
shell.add_component(LoadScript('init.py'))
terminal.run(shell)
|
()
|
50,799 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_linux
|
AppDirPath
| null |
class AppDirPath(AppDirPathBase):
def __init__(self, max_cache_size: Optional[int] = None) -> None:
super().__init__(max_cache_size)
def dir_type_mapping(self, dir_type: AppDirectoryType) -> DirTypeMappingItem:
return dir_type_mapping[dir_type]
def base_dir_id_to_path(self, base_dir_id: BaseDirID) -> str:
result_path = None
if LinuxDirectoryType.local_data == base_dir_id:
result_path = os.environ.get('XDG_STATE_HOME', os.path.join(os.path.expanduser('~'), '.local', 'state'))
elif LinuxDirectoryType.local_static_data == base_dir_id:
result_path = os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
elif LinuxDirectoryType.local_cache == base_dir_id:
result_path = os.environ.get('XDG_CACHE_HOME', os.path.join(os.path.expanduser('~'), '.cache'))
elif LinuxDirectoryType.local_config == base_dir_id:
result_path = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config'))
elif LinuxDirectoryType.local_log == base_dir_id:
result_path = os.environ.get('XDG_STATE_HOME', os.path.join(os.path.expanduser('~'), '.local', 'state'))
elif LinuxDirectoryType.local_temp == base_dir_id:
result_path = os.environ.get('XDG_RUNTIME_DIR', os.path.join(os.path.expanduser('~'), '.tmp'))
elif LinuxDirectoryType.local_runtime == base_dir_id:
result_path = os.environ.get('XDG_RUNTIME_DIR', os.path.join(os.path.expanduser('~'), '.tmp'))
elif LinuxDirectoryType.program_data == base_dir_id:
result_path = '/var/lib'
elif LinuxDirectoryType.program_static_data == base_dir_id:
result_path = '/usr/share'
elif LinuxDirectoryType.program_cache == base_dir_id:
result_path = '/var/cache'
elif LinuxDirectoryType.program_config == base_dir_id:
result_path = '/etc'
elif LinuxDirectoryType.program_log == base_dir_id:
result_path = '/var/log'
elif LinuxDirectoryType.program_temp == base_dir_id:
result_path = '/tmp'
elif LinuxDirectoryType.program_runtime == base_dir_id:
result_path = '/var/run'
elif LinuxDirectoryType.program_files == base_dir_id:
result_path = '/opt'
elif LinuxDirectoryType.user_profile_data == base_dir_id:
result_path = os.path.expanduser('~')
elif LinuxDirectoryType.user_profile_program_files == base_dir_id:
result_path = os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
elif LinuxDirectoryType.user_profile_program_files_common == base_dir_id:
result_path = os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
return result_path
|
(max_cache_size: Optional[int] = None) -> None
|
50,800 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
__call__
| null |
def __call__(self, dir_type: AppDirectoryType, app_name_or_path: Optional[DirNameOrPath] = None, with_structure: bool = True, ensure_dir: bool = True) -> str:
if app_name_or_path is not None:
app_name_or_path = str(app_name_or_path)
if AppDirectoryType.user_profile_data == dir_type:
if app_name_or_path is not None:
app_name_or_path = '.' + os.path.join(*norm_dir_name_or_path(app_name_or_path))
mapping: DirTypeMappingItem = self.dir_type_mapping(dir_type)
base_dir_path = self.base_dir_id_to_path(mapping[0])
result_list = [base_dir_path]
result_list.extend(norm_dir_name_or_path(mapping[1]))
if app_name_or_path is not None:
result_list.extend(norm_dir_name_or_path(app_name_or_path))
if with_structure:
result_list.extend(norm_dir_name_or_path(mapping[2]))
result_path = os.path.normpath(os.path.join(*result_list))
if ensure_dir:
ensure_dir_exists(result_path)
return result_path
|
(self, dir_type: cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_directory_types.AppDirectoryType, app_name_or_path: Union[str, Sequence[str], NoneType] = None, with_structure: bool = True, ensure_dir: bool = True) -> str
|
50,801 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_linux
|
__init__
| null |
def __init__(self, max_cache_size: Optional[int] = None) -> None:
super().__init__(max_cache_size)
|
(self, max_cache_size: Optional[int] = None) -> NoneType
|
50,802 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_linux
|
base_dir_id_to_path
| null |
def base_dir_id_to_path(self, base_dir_id: BaseDirID) -> str:
result_path = None
if LinuxDirectoryType.local_data == base_dir_id:
result_path = os.environ.get('XDG_STATE_HOME', os.path.join(os.path.expanduser('~'), '.local', 'state'))
elif LinuxDirectoryType.local_static_data == base_dir_id:
result_path = os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
elif LinuxDirectoryType.local_cache == base_dir_id:
result_path = os.environ.get('XDG_CACHE_HOME', os.path.join(os.path.expanduser('~'), '.cache'))
elif LinuxDirectoryType.local_config == base_dir_id:
result_path = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config'))
elif LinuxDirectoryType.local_log == base_dir_id:
result_path = os.environ.get('XDG_STATE_HOME', os.path.join(os.path.expanduser('~'), '.local', 'state'))
elif LinuxDirectoryType.local_temp == base_dir_id:
result_path = os.environ.get('XDG_RUNTIME_DIR', os.path.join(os.path.expanduser('~'), '.tmp'))
elif LinuxDirectoryType.local_runtime == base_dir_id:
result_path = os.environ.get('XDG_RUNTIME_DIR', os.path.join(os.path.expanduser('~'), '.tmp'))
elif LinuxDirectoryType.program_data == base_dir_id:
result_path = '/var/lib'
elif LinuxDirectoryType.program_static_data == base_dir_id:
result_path = '/usr/share'
elif LinuxDirectoryType.program_cache == base_dir_id:
result_path = '/var/cache'
elif LinuxDirectoryType.program_config == base_dir_id:
result_path = '/etc'
elif LinuxDirectoryType.program_log == base_dir_id:
result_path = '/var/log'
elif LinuxDirectoryType.program_temp == base_dir_id:
result_path = '/tmp'
elif LinuxDirectoryType.program_runtime == base_dir_id:
result_path = '/var/run'
elif LinuxDirectoryType.program_files == base_dir_id:
result_path = '/opt'
elif LinuxDirectoryType.user_profile_data == base_dir_id:
result_path = os.path.expanduser('~')
elif LinuxDirectoryType.user_profile_program_files == base_dir_id:
result_path = os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
elif LinuxDirectoryType.user_profile_program_files_common == base_dir_id:
result_path = os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
return result_path
|
(self, base_dir_id: Any) -> str
|
50,803 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
cached
| null |
def cached(self, dir_type: AppDirectoryType, app_name_or_path: Optional[DirNameOrPath] = None, with_structure: bool = True, ensure_dir: bool = True) -> str:
return self._cached(dir_type, app_name_or_path, with_structure, ensure_dir)
|
(self, dir_type: cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_directory_types.AppDirectoryType, app_name_or_path: Union[str, Sequence[str], NoneType] = None, with_structure: bool = True, ensure_dir: bool = True) -> str
|
50,804 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_linux
|
dir_type_mapping
| null |
def dir_type_mapping(self, dir_type: AppDirectoryType) -> DirTypeMappingItem:
return dir_type_mapping[dir_type]
|
(self, dir_type: cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_directory_types.AppDirectoryType) -> Tuple[Any, Union[str, Sequence[str], NoneType], Union[str, Sequence[str], NoneType]]
|
50,805 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
get_cache_size
| null |
def get_cache_size(self) -> int:
return self._max_cache_size
|
(self) -> int
|
50,806 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
set_cache_size
| null |
def set_cache_size(self, max_cache_size: int) -> None:
self._max_cache_size = max_cache_size
del self._cached
self._cached = lru_cache(maxsize=max_cache_size, typed=True)(self.__call__)
|
(self, max_cache_size: int) -> NoneType
|
50,807 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
AppDirPathBase
| null |
class AppDirPathBase:
def __init__(self, max_cache_size: Optional[int] = None) -> None:
if max_cache_size is None:
max_cache_size = len(AppDirectoryType) * 2 * 2 * 10 # 10 - is a number of applications; 2 - is `with_structure` [True/False]; 2 - is `ensure_dir` [True/False]
self._max_cache_size = max_cache_size
self._cached = self.__call__
self.set_cache_size(max_cache_size)
def get_cache_size(self) -> int:
return self._max_cache_size
def set_cache_size(self, max_cache_size: int) -> None:
self._max_cache_size = max_cache_size
del self._cached
self._cached = lru_cache(maxsize=max_cache_size, typed=True)(self.__call__)
def dir_type_mapping(self, dir_type: AppDirectoryType) -> DirTypeMappingItem:
raise NotImplementedError
def base_dir_id_to_path(self, base_dir_id: BaseDirID) -> str:
raise NotImplementedError
def __call__(self, dir_type: AppDirectoryType, app_name_or_path: Optional[DirNameOrPath] = None, with_structure: bool = True, ensure_dir: bool = True) -> str:
if app_name_or_path is not None:
app_name_or_path = str(app_name_or_path)
if AppDirectoryType.user_profile_data == dir_type:
if app_name_or_path is not None:
app_name_or_path = '.' + os.path.join(*norm_dir_name_or_path(app_name_or_path))
mapping: DirTypeMappingItem = self.dir_type_mapping(dir_type)
base_dir_path = self.base_dir_id_to_path(mapping[0])
result_list = [base_dir_path]
result_list.extend(norm_dir_name_or_path(mapping[1]))
if app_name_or_path is not None:
result_list.extend(norm_dir_name_or_path(app_name_or_path))
if with_structure:
result_list.extend(norm_dir_name_or_path(mapping[2]))
result_path = os.path.normpath(os.path.join(*result_list))
if ensure_dir:
ensure_dir_exists(result_path)
return result_path
def cached(self, dir_type: AppDirectoryType, app_name_or_path: Optional[DirNameOrPath] = None, with_structure: bool = True, ensure_dir: bool = True) -> str:
return self._cached(dir_type, app_name_or_path, with_structure, ensure_dir)
|
(max_cache_size: Optional[int] = None) -> None
|
50,809 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
__init__
| null |
def __init__(self, max_cache_size: Optional[int] = None) -> None:
if max_cache_size is None:
max_cache_size = len(AppDirectoryType) * 2 * 2 * 10 # 10 - is a number of applications; 2 - is `with_structure` [True/False]; 2 - is `ensure_dir` [True/False]
self._max_cache_size = max_cache_size
self._cached = self.__call__
self.set_cache_size(max_cache_size)
|
(self, max_cache_size: Optional[int] = None) -> NoneType
|
50,810 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
base_dir_id_to_path
| null |
def base_dir_id_to_path(self, base_dir_id: BaseDirID) -> str:
raise NotImplementedError
|
(self, base_dir_id: Any) -> str
|
50,812 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
dir_type_mapping
| null |
def dir_type_mapping(self, dir_type: AppDirectoryType) -> DirTypeMappingItem:
raise NotImplementedError
|
(self, dir_type: cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_directory_types.AppDirectoryType) -> Tuple[Any, Union[str, Sequence[str], NoneType], Union[str, Sequence[str], NoneType]]
|
50,815 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_directory_types
|
AppDirectoryType
|
An enumeration.
|
class AppDirectoryType(IntEnum):
local_data = 0 # local data is data that is not shared between users
local_low_data = 1 # local data is data that is not shared between users. This folder is intended to be used by applications that require a low privilege level to operate, such as web browsers or sandboxed applications.
roaming_data = 2 # roaming data is data that is not shared between users but might be shared and synchronised between devices of this user. On Mac OS X, automatic system backup for a small (few kbytes) files is used
local_cache = 3 # Where user-specific non-essential (cached) data should be written
local_low_cache = 4 # Where user-specific non-essential (cached) data should be written
roaming_cache = 5 # Where user-specific non-essential (cached) data should be written. Might be shared and synchronised between devices of this user
local_temp = 6
local_low_temp = 7
roaming_temp = 8
local_log = 9
local_low_log = 10
roaming_log = 11
local_config = 12
local_low_config = 13
roaming_config = 14
local_runtime = 15 # Used for non-essential, user-specific data files such as sockets, named pipes, etc.
local_low_runtime = 16 # Used for non-essential, user-specific data files such as sockets, named pipes, etc.
roaming_runtime = 17 # Used for non-essential, user-specific data files such as sockets, named pipes, etc. Might be shared and synchronised between devices of this user
program_files = 18 # program files is program static data that is shared between users
program_files_common = 19 # program files common is program static data that is shared between applications and users. For example libraries
program_data = 20 # program data is non-static data that is shared between users
program_cache = 21 # Where non-essential (cached) data should be written
program_temp = 22
program_log = 23
program_config = 24
program_runtime = 25 # Used for non-essential, data files such as sockets, named pipes, etc.
user_profile_data = 28 # App data in user home directory. It is usually hidden directory
user_profile_program_files = 29 # program files is program static data that is shared between users
user_profile_program_files_common = 30 # program files common is program static data that is shared between applications and users. For example libraries
local_static_data = 31
local_low_static_data = 32
roaming_static_data = 33
program_static_data = 34
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
50,816 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_exceptions
|
NotAwailableAppDirError
| null |
class NotAwailableAppDirError(Exception):
pass
| null |
50,821 |
cengal.file_system.app_fs_structure.app_dir_path.versions.v_0.app_dir_path_base
|
norm_dir_name_or_path
| null |
def norm_dir_name_or_path(dir_name_or_path: DirNameOrPath) -> Tuple[str, ...]:
if dir_name_or_path is None:
return ()
elif isinstance(dir_name_or_path, str):
return (dir_name_or_path,)
elif isinstance(dir_name_or_path, (list, tuple)):
# return tuple(dir_name_or_path)
return dir_name_or_path
else:
raise TypeError('dir_name_or_path must be str, list or tuple')
|
(dir_name_or_path: Union[str, Sequence[str], NoneType]) -> Tuple[str, ...]
|
50,825 |
macry.builder
|
FireModel
| null |
class FireModel(FireStore, Field):
__type_name__ = 'BaseModel'
def __init__(self, *args, **kwargs):
super().__init__(self)
self.update_dict = {}
self.__firemodel_item__ = 'FireModel'
if hasattr(self, '__required__'):
for attr in self.__required__:
if attr not in kwargs:
raise TypeError(
f"{type(self).__name__}"
f"missing required keyword argument: '{attr}'")
for key in kwargs:
self.__setattr__(key, kwargs[key])
def to_dict(self):
dict_repr = {}
for key in self.__field_items__:
if (self.__dict__.get(key) is None) or isinstance(
self.__dict__[key], type(None)):
dict_repr[key] = None
elif isinstance(self.__dict__[key],
(FireModel, FireMap, FireArray)):
dict_repr[key] = self.__dict__[key].to_dict()
else:
dict_repr[key] = self.__dict__[key]
return dict_repr
def __del__(self):
keys = list(self.__dict__.keys()).copy()
for key in keys:
if isinstance(self.__dict__[key],
(FireModel,
FireMap,
FireArray)):
if hasattr(self.__dict__[key], '__path__'):
del self.__dict__[key].__path__['root']
del self.__dict__[key]
def route_paths(self, mm: weakref = None, root_path: str = '',
root_object: weakref = None):
if not mm:
mm = weakref.ref(self)
if not root_object:
root_object = mm
model_dict = {}
if isinstance(mm(), list):
for i, item in enumerate(mm()):
if isinstance(item, (FireModel, FireMap, FireArray)):
self.route_paths(weakref.ref(item),
root_path=f'{root_path}.[{i}]',
root_object=root_object)
elif isinstance(mm(), FireModel):
model_dict = mm().__dict__
elif isinstance(mm(), FireMap):
model_dict = mm()
else:
return
for n in model_dict:
if isinstance(model_dict[n], (FireModel, FireMap, FireArray)):
name = mm().__object_name__
path = f"{root_path}.{name}.{n}".strip('..')
model_dict[n].__path__ = {
'path': path,
'root': root_object
}
self.route_paths(
weakref.ref(model_dict[n]),
root_path=f"{root_path}.{name}",
root_object=root_object
)
|
(*args, **kwargs)
|
50,826 |
macry.builder
|
__del__
| null |
def __del__(self):
keys = list(self.__dict__.keys()).copy()
for key in keys:
if isinstance(self.__dict__[key],
(FireModel,
FireMap,
FireArray)):
if hasattr(self.__dict__[key], '__path__'):
del self.__dict__[key].__path__['root']
del self.__dict__[key]
|
(self)
|
50,827 |
macry.fields
|
__get__
| null |
def __get__(self, instance, owner_cls):
if instance is None:
return self
else:
return instance.__dict__.get(self.prop_name, None)
|
(self, instance, owner_cls)
|
50,828 |
macry.builder
|
__init__
| null |
def __init__(self, *args, **kwargs):
super().__init__(self)
self.update_dict = {}
self.__firemodel_item__ = 'FireModel'
if hasattr(self, '__required__'):
for attr in self.__required__:
if attr not in kwargs:
raise TypeError(
f"{type(self).__name__}"
f"missing required keyword argument: '{attr}'")
for key in kwargs:
self.__setattr__(key, kwargs[key])
|
(self, *args, **kwargs)
|
50,829 |
macry.fields
|
__set__
| null |
def __set__(self, instance, value):
if self.validate(value):
if value is None:
value = self.default
# Compare for same values and assign to it.
compare_to = self if instance is None else instance.__dict__.get(
self.prop_name, None)
if compare_to != value:
instance.__dict__[self.prop_name] = value
self.register_update(instance, value)
else:
if hasattr(self, 'entity') and self.entity:
if isinstance(self.entity, list):
_name = [e.__name__ for e in self.entity]
else:
_name = self.entity.__name__
else:
_name = type(self).__name__
raise ValueError(f'{self.prop_name} must be a type: {_name}')
|
(self, instance, value)
|
50,830 |
macry.fields
|
__set_name__
| null |
def __set_name__(self, owner_cls, prop_name):
self.prop_name = prop_name
if not hasattr(owner_cls, '__field_items__'):
owner_cls.__field_items__ = set()
owner_cls.__field_items__.add(self.prop_name)
if hasattr(owner_cls, '__data_path__'):
data_path = owner_cls.__data_path__
models_data[data_path] = {
'type': owner_cls,
'collection_type': owner_cls.__collection_type__,
'repr_type': owner_cls.__repr_type__
}
if hasattr(self, 'required') and self.required:
if '__required__' in owner_cls.__dict__:
owner_cls.__required__.append(self.prop_name)
else:
owner_cls.__required__ = []
|
(self, owner_cls, prop_name)
|
50,831 |
macry.connects.firestore_connect
|
connect
| null |
def connect(self, collection_name,
project=None,
service_account_json=None):
self.project = project
self.collection_name = collection_name
self.service_account_json = service_account_json
self.client = None
if self.service_account_json is not None:
credentials = \
service_account.Credentials.from_service_account_file(
self.service_account_json,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
)
self.client = firestore.Client(project=project,
credentials=credentials)
else:
self.client = firestore.Client(project=project)
self.collection = self.client.collection(collection_name)
|
(self, collection_name, project=None, service_account_json=None)
|
50,832 |
macry.connects.firestore_connect
|
delete
| null |
def delete(self, key):
doc_ref = self.collection.document(key)
doc_ref.delete()
|
(self, key)
|
50,833 |
macry.connects.firestore_connect
|
get_all
|
Get all models from firestore
|
def get_all(self, cls):
"""
Get all models from firestore
"""
self.items = {}
for item in self.collection.stream():
self.items[item.id] = cls.from_dict(item.to_dict())
self.items[item.id].key = item.id
self.items[item.id].route_paths()
return self.items
|
(self, cls)
|
50,834 |
macry.connects.firestore_connect
|
get_docs
| null |
def get_docs(self):
return [d.to_dict() for d in self.collection.stream()]
|
(self)
|
50,835 |
macry.connects.firestore_connect
|
query
| null |
def query(self, *filters):
query = self.collection
for filter_ in filters:
query = query.where(*filter_)
return map(lambda x: (x.id, x.to_dict()), query.stream())
|
(self, *filters)
|
50,836 |
macry.connects.firestore_connect
|
read
| null |
def read(self, key):
self.key = key
try:
doc_ref = self.collection.document(key)
doc = doc_ref.get()
except (FailedPrecondition, PermissionDenied):
# TODO - handle exeception
raise
except Exception:
raise
if type(self).__name__ == 'FireStore':
return doc.to_dict()
else:
# TODO - convert doc to type lazy loading
self.from_dict(doc.to_dict())
self.key = doc.id
|
(self, key)
|
50,837 |
macry.fields
|
register_update
| null |
def register_update(self, instance, value):
if hasattr(instance, '__path__'):
if hasattr(value, '__type_name__') and \
value.__type_name__ == 'BaseModel':
update_value = value.to_dict()
instance.__dict__[self.prop_name].route_paths(
instance.__path__["root"],
)
instance.__dict__[self.prop_name].__object_added__ = True
instance.__path__["root"]().update_stack[
f'{instance.__path__["path"]}.{self.prop_name}'
] = update_value
else:
update_value = value
if instance.__dict__.get('__object_added__', False):
instance.__path__["root"]().update_stack[
instance.__path__["path"]][self.prop_name] = \
update_value
else:
instance.__path__["root"]().update_stack[
f'{instance.__path__["path"]}.{self.prop_name}'
] = update_value
# logger.info(f'Set Update stack '
# f'{instance.__path__["path"]}.{self.prop_name},' + \
# f' Val: {value}, Object: {instance.__path__["root"]()}'
|
(self, instance, value)
|
50,838 |
macry.builder
|
route_paths
| null |
def route_paths(self, mm: weakref = None, root_path: str = '',
root_object: weakref = None):
if not mm:
mm = weakref.ref(self)
if not root_object:
root_object = mm
model_dict = {}
if isinstance(mm(), list):
for i, item in enumerate(mm()):
if isinstance(item, (FireModel, FireMap, FireArray)):
self.route_paths(weakref.ref(item),
root_path=f'{root_path}.[{i}]',
root_object=root_object)
elif isinstance(mm(), FireModel):
model_dict = mm().__dict__
elif isinstance(mm(), FireMap):
model_dict = mm()
else:
return
for n in model_dict:
if isinstance(model_dict[n], (FireModel, FireMap, FireArray)):
name = mm().__object_name__
path = f"{root_path}.{name}.{n}".strip('..')
model_dict[n].__path__ = {
'path': path,
'root': root_object
}
self.route_paths(
weakref.ref(model_dict[n]),
root_path=f"{root_path}.{name}",
root_object=root_object
)
|
(self, mm: <module 'weakref' from '/usr/local/lib/python3.10/weakref.py'> = None, root_path: str = '', root_object: <module 'weakref' from '/usr/local/lib/python3.10/weakref.py'> = None)
|
50,839 |
macry.connects.firestore_connect
|
set
| null |
def set(self, key, data):
if not key:
key = self.key
if type(self).__name__ != 'FireStore' and not data:
data = self.to_json()
try:
doc_ref = self.collection.document(self.key)
doc_ref.set(data, merge=False)
except (FailedPrecondition, PermissionDenied):
# TODO - handle exeception
raise
except ServiceUnavailable:
# TODO - handle exeception
raise
|
(self, key, data)
|
50,840 |
macry.builder
|
to_dict
| null |
def to_dict(self):
dict_repr = {}
for key in self.__field_items__:
if (self.__dict__.get(key) is None) or isinstance(
self.__dict__[key], type(None)):
dict_repr[key] = None
elif isinstance(self.__dict__[key],
(FireModel, FireMap, FireArray)):
dict_repr[key] = self.__dict__[key].to_dict()
else:
dict_repr[key] = self.__dict__[key]
return dict_repr
|
(self)
|
50,841 |
macry.connects.firestore_connect
|
update
| null |
def update(self):
for key in self.items:
# logger.info(('update', key, self.items[key].update_stack))
if self.items[key].update_stack:
db = self.collection.document(key)
db.update(self.items[key].update_stack)
|
(self)
|
50,848 |
parameterized.parameterized
|
param
|
Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
|
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args , **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
elif isinstance(args, (str, bytes)) or not isinstance(args, Iterable):
args = (args, )
try:
return cls(*args)
except TypeError as e:
if "after * must be" not in str(e):
raise
raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
%(args, args),
)
def __repr__(self):
return "param(*%r, **%r)" %self
|
(*args, **kwargs)
|
50,850 |
parameterized.parameterized
|
__new__
| null |
def __new__(cls, *args , **kwargs):
return _param.__new__(cls, args, kwargs)
|
(cls, *args, **kwargs)
|
50,851 |
parameterized.parameterized
|
__repr__
| null |
def __repr__(self):
return "param(*%r, **%r)" %self
|
(self)
|
50,853 |
collections
|
_replace
|
Return a new param object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
50,854 |
parameterized.parameterized
|
parameterized
|
Parameterize a test case::
class TestInt(object):
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
|
class parameterized(object):
""" Parameterize a test case::
class TestInt(object):
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input, doc_func=None, skip_on_empty=False):
self.get_input = self.input_as_callable(input)
self.doc_func = doc_func or default_doc_func
self.skip_on_empty = skip_on_empty
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
if test_self is not None:
if issubclass(test_cls, InstanceType):
raise TypeError((
"@parameterized can't be used with old-style classes, but "
"%r has an old-style class. Consider using a new-style "
"class, or '@parameterized.expand' "
"(see http://stackoverflow.com/q/54867/71522 for more "
"information on old-style classes)."
) %(test_self, ))
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
p = param.from_decorator(args)
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
try:
wrapper.__doc__ = nose_tuple[0].__doc__
# Nose uses `getattr(instance, test_func.__name__)` to get
# a method bound to the test instance (as opposed to a
# method bound to the instance of the class created when
# tests were being enumerated). Set a value here to make
# sure nose can get the correct test method.
if test_self is not None:
setattr(test_cls, test_func.__name__, unbound_func)
yield nose_tuple
finally:
if test_self is not None:
delattr(test_cls, test_func.__name__)
wrapper.__doc__ = original_doc
input = self.get_input()
if not input:
if not self.skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
wrapper = wraps(test_func)(skip_on_empty_helper)
wrapper.parameterized_input = input
wrapper.parameterized_func = test_func
test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
return wrapper
def param_as_nose_tuple(self, test_self, func, num, p):
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
nose_func.__doc__ = self.doc_func(func, num, p)
# Track the unbound function because we need to setattr the unbound
# function onto the class for nose to work (see comments above), and
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
# Under nose on Py2 we need to return an unbound method to make
# sure that the `self` in the method is properly shared with the
# `self` used in `setUp` and `tearDown`. But only there. Everyone
# else needs a bound method.
func_self = (
None if PY2 and detect_runner() == "nose" else
test_self
)
nose_func = make_method(nose_func, func_self, type(test_self))
return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead.")
def _terrible_magic_get_defining_classes(self):
""" Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, _, parents = code_context.partition("(")
parents, _, _ = parents.partition(")")
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
# Explicitly convery non-list inputs to a list so that:
# 1. A helpful exception will be raised if they aren't iterable, and
# 2. Generators are unwrapped exactly once (otherwise `nosetests
# --processes=n` has issues; see:
# https://github.com/wolever/nose-parameterized/pull/31)
if not isinstance(input_values, list):
input_values = list(input_values)
return [ param.from_decorator(p) for p in input_values ]
@classmethod
def expand(cls, input, name_func=None, doc_func=None, skip_on_empty=False,
namespace=None, **legacy):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
:param input: An iterable of values to pass to the test function.
:param name_func: A function that takes a single argument (the
value from the input iterable) and returns a string to use as
the name of the test case. If not provided, the name of the
test case will be the name of the test function with the
parameter value appended.
:param doc_func: A function that takes a single argument (the
value from the input iterable) and returns a string to use as
the docstring of the test case. If not provided, the docstring
of the test case will be the docstring of the test function.
:param skip_on_empty: If True, the test will be skipped if the
input iterable is empty. If False, a ValueError will be raised
if the input iterable is empty.
:param namespace: The namespace (dict-like) to inject the test cases
into. If not provided, the namespace of the test function will
be used.
>>> @parameterized.expand([("foo", 1, 2)])
... def test_add1(name, input, expected):
... actual = add1(input)
... assert_equal(actual, expected)
...
>>> locals()
... 'test_add1_foo_0': <function ...> ...
>>>
"""
if "testcase_func_name" in legacy:
warnings.warn("testcase_func_name= is deprecated; use name_func=",
DeprecationWarning, stacklevel=2)
if not name_func:
name_func = legacy["testcase_func_name"]
if "testcase_func_doc" in legacy:
warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
DeprecationWarning, stacklevel=2)
if not doc_func:
doc_func = legacy["testcase_func_doc"]
doc_func = doc_func or default_doc_func
name_func = name_func or default_name_func
def parameterized_expand_wrapper(f, instance=None):
frame_locals = namespace
if frame_locals is None:
frame_locals = inspect.currentframe().f_back.f_locals
parameters = cls.input_as_callable(input)()
if not parameters:
if not skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized.expand([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
return wraps(f)(skip_on_empty_helper)
digits = len(str(len(parameters) - 1))
for num, p in enumerate(parameters):
name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p)
# If the original function has patches applied by 'mock.patch',
# re-construct all patches on the just former decoration layer
# of param_as_standalone_func so as not to share
# patch objects between new functions
nf = reapply_patches_if_need(f)
frame_locals[name] = cls.param_as_standalone_func(p, nf, name)
frame_locals[name].__doc__ = doc_func(f, num, p)
# Delete original patches to prevent new function from evaluating
# original patching object as well as re-constructed patches.
delete_patches_if_need(f)
f.__test__ = False
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
if inspect.iscoroutinefunction(func):
@wraps(func)
async def standalone_func(*a, **kw):
return await func(*(a + p.args), **p.kwargs, **kw)
else:
@wraps(func)
def standalone_func(*a, **kw):
return func(*(a + p.args), **p.kwargs, **kw)
standalone_func.__name__ = name
# place_as is used by py.test to determine what source file should be
# used for this test.
standalone_func.place_as = func
# Remove __wrapped__ because py.test will try to look at __wrapped__
# to determine which parameters should be used with this test case,
# and obviously we don't need it to do any parameterization.
try:
del standalone_func.__wrapped__
except AttributeError:
pass
return standalone_func
@classmethod
def to_safe_name(cls, s):
if not isinstance(s, str):
s = str(s)
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
|
(input, doc_func=None, skip_on_empty=False)
|
50,855 |
parameterized.parameterized
|
__call__
| null |
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
if test_self is not None:
if issubclass(test_cls, InstanceType):
raise TypeError((
"@parameterized can't be used with old-style classes, but "
"%r has an old-style class. Consider using a new-style "
"class, or '@parameterized.expand' "
"(see http://stackoverflow.com/q/54867/71522 for more "
"information on old-style classes)."
) %(test_self, ))
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
p = param.from_decorator(args)
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
try:
wrapper.__doc__ = nose_tuple[0].__doc__
# Nose uses `getattr(instance, test_func.__name__)` to get
# a method bound to the test instance (as opposed to a
# method bound to the instance of the class created when
# tests were being enumerated). Set a value here to make
# sure nose can get the correct test method.
if test_self is not None:
setattr(test_cls, test_func.__name__, unbound_func)
yield nose_tuple
finally:
if test_self is not None:
delattr(test_cls, test_func.__name__)
wrapper.__doc__ = original_doc
input = self.get_input()
if not input:
if not self.skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
wrapper = wraps(test_func)(skip_on_empty_helper)
wrapper.parameterized_input = input
wrapper.parameterized_func = test_func
test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
return wrapper
|
(self, test_func)
|
50,856 |
parameterized.parameterized
|
__init__
| null |
def __init__(self, input, doc_func=None, skip_on_empty=False):
self.get_input = self.input_as_callable(input)
self.doc_func = doc_func or default_doc_func
self.skip_on_empty = skip_on_empty
|
(self, input, doc_func=None, skip_on_empty=False)
|
50,857 |
parameterized.parameterized
|
_terrible_magic_get_defining_classes
|
Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
|
def _terrible_magic_get_defining_classes(self):
""" Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, _, parents = code_context.partition("(")
parents, _, _ = parents.partition(")")
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
|
(self)
|
50,858 |
parameterized.parameterized
|
assert_not_in_testcase_subclass
| null |
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead.")
|
(self)
|
50,859 |
parameterized.parameterized
|
param_as_nose_tuple
| null |
def param_as_nose_tuple(self, test_self, func, num, p):
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
nose_func.__doc__ = self.doc_func(func, num, p)
# Track the unbound function because we need to setattr the unbound
# function onto the class for nose to work (see comments above), and
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
# Under nose on Py2 we need to return an unbound method to make
# sure that the `self` in the method is properly shared with the
# `self` used in `setUp` and `tearDown`. But only there. Everyone
# else needs a bound method.
func_self = (
None if PY2 and detect_runner() == "nose" else
test_self
)
nose_func = make_method(nose_func, func_self, type(test_self))
return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
|
(self, test_self, func, num, p)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.