index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
52,264
osmnx.routing
shortest_path
Solve shortest path from origin node(s) to destination node(s). Uses Dijkstra's algorithm. If `orig` and `dest` are single node IDs, this will return a list of the nodes constituting the shortest path between them. If `orig` and `dest` are lists of node IDs, this will return a list of lists of the nodes constituting the shortest path between each origin-destination pair. If a path cannot be solved, this will return None for that path. You can parallelize solving multiple paths with the `cpus` parameter, but be careful to not exceed your available RAM. See also `k_shortest_paths` to solve multiple shortest paths between a single origin and destination. For additional functionality or different solver algorithms, use NetworkX directly. Parameters ---------- G : networkx.MultiDiGraph input graph orig : int or list origin node ID, or a list of origin node IDs dest : int or list destination node ID, or a list of destination node IDs weight : string edge attribute to minimize when solving shortest path cpus : int how many CPU cores to use; if None, use all available Returns ------- path : list list of node IDs constituting the shortest path, or, if orig and dest are lists, then a list of path lists
def shortest_path(G, orig, dest, weight="length", cpus=1): """ Solve shortest path from origin node(s) to destination node(s). Uses Dijkstra's algorithm. If `orig` and `dest` are single node IDs, this will return a list of the nodes constituting the shortest path between them. If `orig` and `dest` are lists of node IDs, this will return a list of lists of the nodes constituting the shortest path between each origin-destination pair. If a path cannot be solved, this will return None for that path. You can parallelize solving multiple paths with the `cpus` parameter, but be careful to not exceed your available RAM. See also `k_shortest_paths` to solve multiple shortest paths between a single origin and destination. For additional functionality or different solver algorithms, use NetworkX directly. Parameters ---------- G : networkx.MultiDiGraph input graph orig : int or list origin node ID, or a list of origin node IDs dest : int or list destination node ID, or a list of destination node IDs weight : string edge attribute to minimize when solving shortest path cpus : int how many CPU cores to use; if None, use all available Returns ------- path : list list of node IDs constituting the shortest path, or, if orig and dest are lists, then a list of path lists """ _verify_edge_attribute(G, weight) # if neither orig nor dest is iterable, just return the shortest path if not (hasattr(orig, "__iter__") or hasattr(dest, "__iter__")): return _single_shortest_path(G, orig, dest, weight) # if only 1 of orig or dest is iterable and the other is not, raise error if not (hasattr(orig, "__iter__") and hasattr(dest, "__iter__")): msg = "orig and dest must either both be iterable or neither must be iterable" raise ValueError(msg) # if both orig and dest are iterable, ensure they have same lengths if len(orig) != len(dest): # pragma: no cover msg = "orig and dest must be of equal length" raise ValueError(msg) # determine how many cpu cores to use if cpus is None: cpus = mp.cpu_count() cpus = min(cpus, mp.cpu_count()) utils.log(f"Solving {len(orig)} paths with {cpus} CPUs...") # if single-threading, calculate each shortest path one at a time if cpus == 1: paths = [_single_shortest_path(G, o, d, weight) for o, d in zip(orig, dest)] # if multi-threading, calculate shortest paths in parallel else: args = ((G, o, d, weight) for o, d in zip(orig, dest)) with mp.get_context("spawn").Pool(cpus) as pool: paths = pool.starmap_async(_single_shortest_path, args).get() return paths
(G, orig, dest, weight='length', cpus=1)
52,266
osmnx.simplification
simplify_graph
Simplify a graph's topology by removing interstitial nodes. This simplifies graph topology by removing all nodes that are not intersections or dead-ends, by creating an edge directly between the end points that encapsulate them while retaining the full geometry of the original edges, saved as a new `geometry` attribute on the new edge. Note that only simplified edges receive a `geometry` attribute. Some of the resulting consolidated edges may comprise multiple OSM ways, and if so, their multiple attribute values are stored as a list. Optionally, the simplified edges can receive a `merged_edges` attribute that contains a list of all the (u, v) node pairs that were merged together. Use the `edge_attrs_differ` parameter to relax simplification strictness. For example, `edge_attrs_differ=['osmid']` will retain every node whose incident edges have different OSM IDs. This lets you keep nodes at elbow two-way intersections (but be aware that sometimes individual blocks have multiple OSM IDs within them too). You could also use this parameter to retain nodes where sidewalks or bike lanes begin/end in the middle of a block. Parameters ---------- G : networkx.MultiDiGraph input graph strict : bool deprecated, do not use edge_attrs_differ : iterable An iterable of edge attribute names for relaxing the strictness of endpoint determination. If not None, a node is an endpoint if its incident edges have different values then each other for any of the edge attributes in `edge_attrs_differ`. endpoint_attrs : iterable deprecated, do not use remove_rings : bool if True, remove isolated self-contained rings that have no endpoints track_merged : bool if True, add `merged_edges` attribute on simplified edges, containing a list of all the (u, v) node pairs that were merged together Returns ------- G : networkx.MultiDiGraph topologically simplified graph, with a new `geometry` attribute on each simplified edge
def simplify_graph( # noqa: C901 G, strict=None, edge_attrs_differ=None, endpoint_attrs=None, remove_rings=True, track_merged=False, ): """ Simplify a graph's topology by removing interstitial nodes. This simplifies graph topology by removing all nodes that are not intersections or dead-ends, by creating an edge directly between the end points that encapsulate them while retaining the full geometry of the original edges, saved as a new `geometry` attribute on the new edge. Note that only simplified edges receive a `geometry` attribute. Some of the resulting consolidated edges may comprise multiple OSM ways, and if so, their multiple attribute values are stored as a list. Optionally, the simplified edges can receive a `merged_edges` attribute that contains a list of all the (u, v) node pairs that were merged together. Use the `edge_attrs_differ` parameter to relax simplification strictness. For example, `edge_attrs_differ=['osmid']` will retain every node whose incident edges have different OSM IDs. This lets you keep nodes at elbow two-way intersections (but be aware that sometimes individual blocks have multiple OSM IDs within them too). You could also use this parameter to retain nodes where sidewalks or bike lanes begin/end in the middle of a block. Parameters ---------- G : networkx.MultiDiGraph input graph strict : bool deprecated, do not use edge_attrs_differ : iterable An iterable of edge attribute names for relaxing the strictness of endpoint determination. If not None, a node is an endpoint if its incident edges have different values then each other for any of the edge attributes in `edge_attrs_differ`. endpoint_attrs : iterable deprecated, do not use remove_rings : bool if True, remove isolated self-contained rings that have no endpoints track_merged : bool if True, add `merged_edges` attribute on simplified edges, containing a list of all the (u, v) node pairs that were merged together Returns ------- G : networkx.MultiDiGraph topologically simplified graph, with a new `geometry` attribute on each simplified edge """ if endpoint_attrs is not None: msg = ( "The `endpoint_attrs` parameter has been deprecated and will be removed " "in the v2.0.0 release. Use the `edge_attrs_differ` parameter instead. " "See the OSMnx v2 migration guide: https://github.com/gboeing/osmnx/issues/1123" ) warn(msg, FutureWarning, stacklevel=2) edge_attrs_differ = endpoint_attrs if strict is not None: msg = ( "The `strict` parameter has been deprecated and will be removed in " "the v2.0.0 release. Use the `edge_attrs_differ` parameter instead to " "relax simplification strictness. For example, `edge_attrs_differ=None` " "reproduces the old `strict=True` behvavior and `edge_attrs_differ=['osmid']` " "reproduces the old `strict=False` behavior. " "See the OSMnx v2 migration guide: https://github.com/gboeing/osmnx/issues/1123" ) warn(msg, FutureWarning, stacklevel=2) # maintain old behavior if strict is passed during deprecation edge_attrs_differ = None if strict else ["osmid"] if "simplified" in G.graph and G.graph["simplified"]: # pragma: no cover msg = "This graph has already been simplified, cannot simplify it again." raise GraphSimplificationError(msg) utils.log("Begin topologically simplifying the graph...") # define edge segment attributes to sum upon edge simplification attrs_to_sum = {"length", "travel_time"} # make a copy to not mutate original graph object caller passed in G = G.copy() initial_node_count = len(G) initial_edge_count = len(G.edges) all_nodes_to_remove = [] all_edges_to_add = [] # generate each path that needs to be simplified for path in _get_paths_to_simplify(G, edge_attrs_differ): # add the interstitial edges we're removing to a list so we can retain # their spatial geometry merged_edges = [] path_attributes = {} for u, v in zip(path[:-1], path[1:]): if track_merged: # keep track of the edges that were merged merged_edges.append((u, v)) # there should rarely be multiple edges between interstitial nodes # usually happens if OSM has duplicate ways digitized for just one # street... we will keep only one of the edges (see below) edge_count = G.number_of_edges(u, v) if edge_count != 1: utils.log(f"Found {edge_count} edges between {u} and {v} when simplifying") # get edge between these nodes: if multiple edges exist between # them (see above), we retain only one in the simplified graph # We can't assume that there exists an edge from u to v # with key=0, so we get a list of all edges from u to v # and just take the first one. edge_data = list(G.get_edge_data(u, v).values())[0] for attr in edge_data: if attr in path_attributes: # if this key already exists in the dict, append it to the # value list path_attributes[attr].append(edge_data[attr]) else: # if this key doesn't already exist, set the value to a list # containing the one value path_attributes[attr] = [edge_data[attr]] # consolidate the path's edge segments' attribute values for attr in path_attributes: if attr in attrs_to_sum: # if this attribute must be summed, sum it now path_attributes[attr] = sum(path_attributes[attr]) elif len(set(path_attributes[attr])) == 1: # if there's only 1 unique value in this attribute list, # consolidate it to the single value (the zero-th): path_attributes[attr] = path_attributes[attr][0] else: # otherwise, if there are multiple values, keep one of each path_attributes[attr] = list(set(path_attributes[attr])) # construct the new consolidated edge's geometry for this path path_attributes["geometry"] = LineString( [Point((G.nodes[node]["x"], G.nodes[node]["y"])) for node in path] ) if track_merged: # add the merged edges as a new attribute of the simplified edge path_attributes["merged_edges"] = merged_edges # add the nodes and edge to their lists for processing at the end all_nodes_to_remove.extend(path[1:-1]) all_edges_to_add.append( {"origin": path[0], "destination": path[-1], "attr_dict": path_attributes} ) # for each edge to add in the list we assembled, create a new edge between # the origin and destination for edge in all_edges_to_add: G.add_edge(edge["origin"], edge["destination"], **edge["attr_dict"]) # finally remove all the interstitial nodes between the new edges G.remove_nodes_from(set(all_nodes_to_remove)) if remove_rings: G = _remove_rings(G, edge_attrs_differ) # mark the graph as having been simplified G.graph["simplified"] = True msg = ( f"Simplified graph: {initial_node_count:,} to {len(G):,} nodes, " f"{initial_edge_count:,} to {len(G.edges):,} edges" ) utils.log(msg) return G
(G, strict=None, edge_attrs_differ=None, endpoint_attrs=None, remove_rings=True, track_merged=False)
52,270
osmnx.utils
ts
Return current local timestamp as a string. Parameters ---------- style : string {"datetime", "date", "time"} format the timestamp with this built-in style template : string if not None, format the timestamp with this format string instead of one of the built-in styles Returns ------- ts : string local timestamp string
def ts(style="datetime", template=None): """ Return current local timestamp as a string. Parameters ---------- style : string {"datetime", "date", "time"} format the timestamp with this built-in style template : string if not None, format the timestamp with this format string instead of one of the built-in styles Returns ------- ts : string local timestamp string """ if template is None: if style == "datetime": template = "{:%Y-%m-%d %H:%M:%S}" elif style == "date": template = "{:%Y-%m-%d}" elif style == "time": template = "{:%H:%M:%S}" else: # pragma: no cover msg = f"unrecognized timestamp style {style!r}" raise ValueError(msg) return template.format(dt.datetime.now().astimezone())
(style='datetime', template=None)
52,274
flexget.manager
Manager
Manager class for FlexGet Fires events: * manager.initialize The first time the manager is initialized, before config is loaded * manager.before_config_load Before the config file is loaded from disk * manager.before_config_validate When updating the config, before the validator is run on it * manager.config_updated After a configuration file has been loaded or changed (and validated) this event is fired * manager.startup After manager has been initialized. This is when application becomes ready to use, however no database lock is present, so the database must not be modified on this event. * manager.lock_acquired The manager does not always require a lock on startup, if one is requested, this event will run when it has been acquired successfully * manager.upgrade If any plugins have declared a newer schema version than exists in the database, this event will be fired to allow plugins to upgrade their tables * manager.shutdown_requested When shutdown has been requested. Any plugins which might add to execution queue should stop when this is fired. * manager.shutdown When the manager is exiting * manager.execute.completed If execution in current process was completed * manager.daemon.started * manager.daemon.completed * manager.db_cleanup
class Manager: """Manager class for FlexGet Fires events: * manager.initialize The first time the manager is initialized, before config is loaded * manager.before_config_load Before the config file is loaded from disk * manager.before_config_validate When updating the config, before the validator is run on it * manager.config_updated After a configuration file has been loaded or changed (and validated) this event is fired * manager.startup After manager has been initialized. This is when application becomes ready to use, however no database lock is present, so the database must not be modified on this event. * manager.lock_acquired The manager does not always require a lock on startup, if one is requested, this event will run when it has been acquired successfully * manager.upgrade If any plugins have declared a newer schema version than exists in the database, this event will be fired to allow plugins to upgrade their tables * manager.shutdown_requested When shutdown has been requested. Any plugins which might add to execution queue should stop when this is fired. * manager.shutdown When the manager is exiting * manager.execute.completed If execution in current process was completed * manager.daemon.started * manager.daemon.completed * manager.db_cleanup """ unit_test = False options: argparse.Namespace def __init__(self, args: List[str]) -> None: """ :param args: CLI args """ global manager if not self.unit_test: assert not manager, 'Only one instance of Manager should be created at a time!' elif manager: logger.info('last manager was not torn down correctly') self.args = args self.autoreload_config = False self.config_file_hash: Optional[str] = None self.config_base: str = '' self.config_name: str = '' self.config_path: str = '' self.log_filename: str = '' self.db_filename: str = '' self.engine: Optional[Engine] = None self.lockfile: str = '' self.database_uri: str = '' self.db_upgraded = False self._has_lock = False self.is_daemon = False self.ipc_server: IPCServer self.task_queue: TaskQueue self.persist: SimplePersistence self.initialized = False self.config: Dict = {} self.options = self.parse_initial_options(args) self._init_config(create=False) # When we are in test mode, we use a different lock file and db if self.options.test: self.lockfile = os.path.join(self.config_base, f'.test-{self.config_name}-lock') self._init_logging() manager = self logger.debug('sys.defaultencoding: {}', sys.getdefaultencoding()) logger.debug('sys.getfilesystemencoding: {}', sys.getfilesystemencoding()) logger.debug('flexget detected io encoding: {}', io_encoding) logger.debug('os.path.supports_unicode_filenames: {}', os.path.supports_unicode_filenames) if ( codecs.lookup(sys.getfilesystemencoding()).name == 'ascii' and not os.path.supports_unicode_filenames ): logger.warning( 'Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from ' 'disk will not work properly for filenames containing non-ascii characters. Make sure your ' 'locale env variables are set up correctly for the environment which is launching FlexGet.' ) def _add_tray_icon_items(self, tray_icon: 'TrayIcon'): tray_icon.add_menu_item(text='Shutdown', action=self.shutdown, index=2) tray_icon.add_menu_item(text='Reload Config', action=self.load_config, index=3) tray_icon.add_menu_separator(index=4) @staticmethod def parse_initial_options(args: List[str]) -> argparse.Namespace: """Parse what we can from cli args before plugins are loaded.""" try: options = CoreArgumentParser().parse_known_args(args, do_help=False)[0] except ParserError as exc: try: # If a non-built-in command was used, we need to parse with a parser that # doesn't define the subparsers options = manager_parser.parse_known_args(args, do_help=False)[0] except ParserError: manager_parser.print_help() print(f'\nError: {exc.message}') sys.exit(1) return options def _init_logging(self) -> None: """Initialize logging variables""" log_file = os.path.expanduser(self.options.logfile) # If an absolute path is not specified, use the config directory. if not os.path.isabs(log_file): log_file = os.path.join(self.config_base, log_file) self.log_filename = log_file def initialize(self) -> None: """ Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server. This should only be called after obtaining a lock. """ if self.initialized: raise RuntimeError('Cannot call initialize on an already initialized manager.') plugin.load_plugins( extra_plugins=[os.path.join(self.config_base, 'plugins')], extra_components=[os.path.join(self.config_base, 'components')], ) # Reparse CLI options now that plugins are loaded self.options = get_parser().parse_args(self.args) self.task_queue = TaskQueue() self.ipc_server = IPCServer(self, self.options.ipc_port) self.setup_yaml() self.init_sqlalchemy() fire_event('manager.initialize', self) try: self.load_config() except ValueError as e: logger.critical('Failed to load config file: {}', e.args[0]) raise # cannot be imported at module level because of circular references from flexget.utils.simple_persistence import SimplePersistence self.persist = SimplePersistence('manager') if db_schema.upgrade_required(): logger.info('Database upgrade is required. Attempting now.') fire_event('manager.upgrade', self) if manager.db_upgraded: fire_event('manager.db_upgraded', self) fire_event('manager.startup', self) self.initialized = True @property def tasks(self) -> List[str]: """A list of tasks in the config""" if not self.config: return [] return list(self.config.get('tasks', {}).keys()) @property def has_lock(self) -> bool: return self._has_lock def execute( self, options: Optional[Union[dict, argparse.Namespace]] = None, priority: int = 1, suppress_warnings: Optional[Sequence[str]] = None, ) -> List[Tuple[str, str, threading.Event]]: """ Run all (can be limited with options) tasks from the config. :param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution :param priority: If there are other executions waiting to be run, they will be run in priority order, lowest first. :param suppress_warnings: Allows suppressing log warning about missing plugin in key phases :returns: a list of :class:`threading.Event` instances which will be set when each respective task has finished running """ if options is None: options = copy.copy(self.options.execute) elif isinstance(options, dict): options_namespace = copy.copy(self.options.execute) options_namespace.__dict__.update(options) options = options_namespace task_names = self.tasks # Only reload config if daemon config_hash = self.hash_config() if self.is_daemon and self.autoreload_config and self.config_file_hash != config_hash: logger.info('Config change detected. Reloading.') try: self.load_config(output_to_console=False, config_file_hash=config_hash) logger.info('Config successfully reloaded!') except Exception as e: logger.error('Reloading config failed: {}', e) # Handle --tasks if options.tasks: # Consider '*' the same as not specifying any tasks. # (So manual plugin doesn't consider them explicitly enabled.) if options.tasks == ['*']: options.tasks = None else: task_names = [] for task in options.tasks: try: task_names.extend( m for m in self.matching_tasks(task) if m not in task_names ) except ValueError as e: logger.error(e) continue options.tasks = task_names # TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one task_names = sorted( task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535) ) finished_events = [] for task_name in task_names: task = Task( self, task_name, options=options, output=get_console_output(), session_id=flexget.log.get_log_session_id(), priority=priority, suppress_warnings=suppress_warnings, ) self.task_queue.put(task) finished_events.append((task.id, task.name, task.finished_event)) return finished_events def start(self) -> None: """ Starting point when executing from commandline, dispatch execution to correct destination. If there is a FlexGet process with an ipc server already running, the command will be sent there for execution and results will be streamed back. If not, this will attempt to obtain a lock, initialize the manager, and run the command here. """ # If another process is started, send the execution to the running process ipc_info = self.check_ipc_info() # If we are connecting to a running daemon, we don't want to log to the log file, # the daemon is already handling that. if ipc_info: console( 'There is a FlexGet process already running for this config, sending execution there.' ) logger.debug('Sending command to running FlexGet process: {}', self.args) try: client = IPCClient(ipc_info['port'], ipc_info['password']) except ValueError as e: logger.error(e) else: try: client.handle_cli(self.args) except KeyboardInterrupt: logger.error( 'Disconnecting from daemon due to ctrl-c. Executions will still continue in the ' 'background.' ) except EOFError: logger.error('Connection from daemon was severed.') return if self.options.test: logger.info('Test mode, creating a copy from database ...') db_test_filename = os.path.join(self.config_base, f'test-{self.config_name}.sqlite') if os.path.exists(self.db_filename): shutil.copy(self.db_filename, db_test_filename) logger.info('Test database created') self.db_filename = db_test_filename # No running process, we start our own to handle command with self.acquire_lock(): self.initialize() self.handle_cli() self._shutdown() def handle_cli(self, options: Optional[argparse.Namespace] = None) -> None: """ Dispatch a cli command to the appropriate function. * :meth:`.execute_command` * :meth:`.daemon_command` * CLI plugin callback function The manager should have a lock and be initialized before calling this method. :param options: argparse options for command. Defaults to options that manager was instantiated with. """ if not options: options = self.options command = options.cli_command if command is None: raise Exception('Command missing') command_options = getattr(options, command) # First check for built-in commands if command in ['execute', 'daemon']: if command == 'execute': self.execute_command(command_options) elif command == 'daemon': self.daemon_command(command_options) else: # Otherwise dispatch the command to the callback function options.cli_command_callback(self, command_options) def execute_command(self, options: argparse.Namespace) -> None: """ Handles the 'execute' CLI command. If there is already a task queue running in this process, adds the execution to the queue. If FlexGet is being invoked with this command, starts up a task queue and runs the execution. Fires events: * manager.execute.started * manager.execute.completed :param options: argparse options """ fire_event('manager.execute.started', self, options) if self.task_queue.is_alive() or self.is_daemon: if not self.task_queue.is_alive(): logger.error( 'Task queue has died unexpectedly. Restarting it. Please open an issue on Github and include' ' any previous error logs.' ) self.task_queue = TaskQueue() self.task_queue.start() if len(self.task_queue): logger.verbose('There is a task already running, execution queued.') finished_events = self.execute(options) if not options.cron: # Wait until execution of all tasks has finished for _, _, event in finished_events: event.wait() else: self.task_queue.start() self.ipc_server.start() self.execute(options) self.shutdown(finish_queue=True) self.task_queue.wait() fire_event('manager.execute.completed', self, options) def daemon_command(self, options: argparse.Namespace) -> None: """ Handles the 'daemon' CLI command. Fires events: * manager.daemon.started * manager.daemon.completed :param options: argparse options """ # Import API so it can register to daemon.started event if options.action == 'start': if self.is_daemon: logger.error('Daemon already running for this config.') return elif self.task_queue.is_alive(): logger.error( 'Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.' ) return if options.daemonize: self.daemonize() if options.autoreload_config: self.autoreload_config = True try: signal.signal(signal.SIGTERM, self._handle_sigterm) except ValueError as e: # If flexget is being called from another script, e.g. windows service helper, and we are not the # main thread, this error will occur. logger.debug('Error registering sigterm handler: {}', e) self.is_daemon = True def run_daemon(tray_icon: 'TrayIcon' = None): fire_event('manager.daemon.started', self) self.task_queue.start() self.ipc_server.start() self.task_queue.wait() fire_event('manager.daemon.completed', self) if tray_icon: tray_icon.stop() if options.tray_icon: from flexget.tray_icon import tray_icon self._add_tray_icon_items(tray_icon) # Tray icon must be run in the main thread. m = threading.Thread(target=run_daemon, args=(tray_icon,)) m.start() tray_icon.run() m.join() else: run_daemon() elif options.action in ['stop', 'reload-config', 'status']: if not self.is_daemon: console('There does not appear to be a daemon running.') return if options.action == 'status': logger.debug('`daemon status` called. Daemon running. (PID: {})', os.getpid()) console(f'Daemon running. (PID: {os.getpid()})') elif options.action == 'stop': tasks = ( 'all queued tasks (if any) have' if options.wait else 'currently running task (if any) has' ) logger.info( 'Daemon shutdown requested. Shutdown will commence when {} finished executing.', tasks, ) self.shutdown(options.wait) elif options.action == 'reload-config': logger.info('Reloading config from disk.') try: self.load_config() except ValueError as e: logger.error('Error loading config: {}', e.args[0]) else: logger.info('Config successfully reloaded from disk.') def _handle_sigterm(self, signum, frame) -> None: logger.info('Got SIGTERM. Shutting down.') self.shutdown(finish_queue=False) def setup_yaml(self) -> None: """Customize the yaml loader/dumper behavior""" # Represent OrderedDict as a regular dict (but don't sort it alphabetically) # This lets us order a dict in a yaml file for easier human consumption def represent_dict_order(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data.items()) yaml.add_representer(collections.OrderedDict, represent_dict_order) # Set up the dumper to increase the indent for lists def increase_indent_wrapper(func): def increase_indent(self, flow=False, indentless=False): func(self, flow, False) return increase_indent yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent) yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent) def _init_config(self, create: bool = False) -> None: """ Find and load the configuration file. :param bool create: If a config file is not found, and create is True, one will be created in the home folder :raises: `OSError` when no config file could be found, and `create` is False. """ home_path = os.path.join(os.path.expanduser('~'), '.flexget') options_config = os.path.expanduser(self.options.config) possible = [] if os.path.isabs(options_config): # explicit path given, don't try anything config = options_config possible = [config] else: logger.debug('Figuring out config load paths') try: possible.append(os.getcwd()) except OSError: logger.debug('current directory invalid, not searching for config there') # for virtualenv / dev sandbox if hasattr(sys, 'real_prefix'): logger.debug('Adding virtualenv path') possible.append(sys.prefix) # normal lookup locations possible.append(home_path) if sys.platform.startswith('win'): # On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot home_path = os.path.join(os.path.expanduser('~'), 'flexget') possible.append(home_path) else: # The freedesktop.org standard config location xdg_config = os.environ.get( 'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config') ) possible.append(os.path.join(xdg_config, 'flexget')) for path in possible: config = os.path.join(path, options_config) if os.path.exists(config): logger.debug('Found config: {}', config) break else: config = None if create and not (config and os.path.exists(config)): config = os.path.join(home_path, options_config) logger.info('Config file {} not found. Creating new config {}', options_config, config) with open(config, 'w') as newconfig: # Write empty tasks to the config newconfig.write(yaml.dump({'tasks': {}})) elif not config: logger.critical('Failed to find configuration file {}', options_config) logger.info('Tried to read from: {}', ', '.join(possible)) raise OSError('No configuration file found.') if not os.path.isfile(config): raise OSError(f'Config `{config}` does not appear to be a file.') logger.debug('Config file {} selected', config) self.config_path = config self.config_name = os.path.splitext(os.path.basename(config))[0] self.config_base = os.path.normpath(os.path.dirname(config)) self.lockfile = os.path.join(self.config_base, f'.{self.config_name}-lock') self.db_filename = os.path.join(self.config_base, f'db-{self.config_name}.sqlite') def hash_config(self) -> Optional[str]: if not self.config_path: return None sha1_hash = hashlib.sha1() with open(self.config_path, 'rb') as f: while True: data = f.read(65536) if not data: break sha1_hash.update(data) return sha1_hash.hexdigest() def load_config( self, output_to_console: bool = True, config_file_hash: Optional[str] = None ) -> None: """ Loads the config file from disk, validates and activates it. :raises: `ValueError` if there is a problem loading the config file """ fire_event('manager.before_config_load', self) with open(self.config_path, encoding='utf-8') as f: try: raw_config = f.read() except UnicodeDecodeError: logger.critical('Config file must be UTF-8 encoded.') raise ValueError('Config file is not UTF-8 encoded') try: self.config_file_hash = config_file_hash or self.hash_config() config = yaml.safe_load(raw_config) or {} except yaml.YAMLError as e: msg = str(e).replace('\n', ' ') msg = ' '.join(msg.split()) logger.critical(msg) if output_to_console: print('') print('-' * 79) print(' Malformed configuration file (check messages above). Common reasons:') print('-' * 79) print('') print(' o Indentation error') print(' o Missing : from end of the line') print(' o Non ASCII characters (use UTF8)') print( ' o If text contains any of :[]{}% characters it must be single-quoted ' '(eg. value{1} should be \'value{1}\')\n' ) # Not very good practice but we get several kind of exceptions here, I'm not even sure all of them # At least: ReaderError, YmlScannerError (or something like that) if isinstance(e, yaml.MarkedYAMLError): lines = 0 if e.problem is not None: print(f' Reason: {e.problem}\n') if e.problem == 'mapping values are not allowed here': print(' ----> MOST LIKELY REASON: Missing : from end of the line!') print('') if e.context_mark is not None: print( f' Check configuration near line {e.context_mark.line}, column {e.context_mark.column}' ) lines += 1 if e.problem_mark is not None: print( f' Check configuration near line {e.problem_mark.line}, column {e.problem_mark.column}' ) lines += 1 if lines: print('') if lines == 1: print(' Fault is almost always in this or previous line\n') if lines == 2: print(' Fault is almost always in one of these lines or previous ones\n') # When --debug escalate to full stacktrace if self.options.debug or not output_to_console: raise raise ValueError('Config file is not valid YAML') # config loaded successfully logger.debug('config_name: {}', self.config_name) logger.debug('config_base: {}', self.config_base) # Install the newly loaded config self.update_config(config) def update_config(self, config: dict) -> None: """ Provide a new config for the manager to use. :raises: `ValueError` and rolls back to previous config if the provided config is not valid. """ new_user_config = config old_config = self.config try: self.config = self.validate_config(config) except ConfigError as e: for error in getattr(e, 'errors', []): logger.critical('[{}] {}', error.json_pointer, error.message) logger.debug('invalid config, rolling back') self.config = old_config raise logger.debug('New config data loaded.') self.user_config = copy.deepcopy(new_user_config) fire_event('manager.config_updated', self) def backup_config(self) -> str: backup_path = os.path.join( self.config_base, f'{self.config_name}-{datetime.now().strftime("%y%m%d%H%M%S")}.bak', ) logger.debug(f'backing up old config to {backup_path} before new save') try: shutil.copy(self.config_path, backup_path) except OSError as e: logger.warning(f'Config backup creation failed: {e}') raise return backup_path def save_config(self) -> None: """Dumps current config to yaml config file""" # TODO: Only keep x number of backups.. # Back up the user's current config before overwriting try: self.backup_config() except OSError: return with open(self.config_path, 'w') as config_file: config_file.write(yaml.dump(self.user_config, default_flow_style=False)) def config_changed(self) -> None: """Makes sure that all tasks will have the config_modified flag come out true on the next run. Useful when changing the db and all tasks need to be completely reprocessed.""" from flexget.task import config_changed config_changed() fire_event('manager.config_updated', self) def validate_config(self, config: Optional[dict] = None) -> dict: """ Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified config will be returned. :param config: Config to check. If not provided, current manager config will be checked. :raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors. :returns: Final validated config. """ conf = config if config else self.config conf = fire_event('manager.before_config_validate', conf, self) errors = config_schema.process_config(conf) if errors: err = ConfigError('Did not pass schema validation.') err.errors = errors raise err else: return conf def init_sqlalchemy(self) -> None: """Initialize SQLAlchemy""" try: if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]: print( 'FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.', file=sys.stderr, ) sys.exit(1) except ValueError: logger.critical('Failed to check SQLAlchemy version, you may need to upgrade it') # SQLAlchemy if not self.database_uri: # in case running on windows, needs double \\ filename = self.db_filename.replace('\\', '\\\\') self.database_uri = f'sqlite:///{filename}' if self.db_filename and not os.path.exists(self.db_filename): logger.verbose('Creating new database {} - DO NOT INTERRUPT ...', self.db_filename) # fire up the engine logger.debug('Connecting to: {}', self.database_uri) try: self.engine = sqlalchemy.create_engine( self.database_uri, echo=self.options.debug_sql, connect_args={'check_same_thread': False, 'timeout': 10}, ) except ImportError as e: print( 'FATAL: Unable to use SQLite. Are you running Python 2.7, 3.3 or newer ?\n' 'Python should normally have SQLite support built in.\n' 'If you\'re running correct version of Python then it is not equipped with SQLite.\n' 'You can try installing `pysqlite`. If you have compiled python yourself, ' 'recompile it with SQLite support.\n' f'Error: {e}', file=sys.stderr, ) sys.exit(1) Session.configure(bind=self.engine) # create all tables, doesn't do anything to existing tables try: Base.metadata.create_all(bind=self.engine) except OperationalError as e: if os.path.exists(self.db_filename): print( f'{e.message} - make sure you have write permissions to file {self.db_filename}', file=sys.stderr, ) else: print( f'{e.message} - make sure you have write permissions to directory {self.config_base}', file=sys.stderr, ) raise def _read_lock(self) -> Optional[dict]: """ Read the values from the lock file. Returns None if there is no current lock file. """ if self.lockfile and os.path.exists(self.lockfile): result: Dict[str, Union[str, int]] = {} with open(self.lockfile, encoding='utf-8') as f: lines = [line for line in f.readlines() if line] for line in lines: try: key, value = line.split(':', 1) except ValueError: logger.debug('Invalid line in lock file: {}', line) continue result[key.strip().lower()] = value.strip() for key in result: if result[key].isdigit(): result[key] = int(result[key]) result.setdefault('pid', None) if not result['pid']: logger.error( 'Invalid lock file. Make sure FlexGet is not running, then delete it.' ) elif not pid_exists(result['pid']): return None return result return None def check_lock(self) -> bool: """Returns True if there is a lock on the database.""" lock_info = self._read_lock() if not lock_info: return False # Don't count it if we hold the lock if os.getpid() == lock_info['pid']: return False return True def check_ipc_info(self) -> Optional[dict]: """If a daemon has a lock on the database, return info to connect to IPC.""" lock_info = self._read_lock() if lock_info and 'port' in lock_info: return lock_info return None @contextmanager def acquire_lock(self, event: bool = True) -> Iterator: """ :param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained """ acquired = False try: # Don't do anything if we already have a lock. This means only the outermost call will release the lock file if not self._has_lock: # Exit if there is an existing lock. if self.check_lock(): with open(self.lockfile, encoding='utf-8') as f: pid = f.read() print( 'Another process (%s) is running, will exit.' % pid.split('\n')[0], file=sys.stderr, ) print( "If you're sure there is no other instance running, delete %s" % self.lockfile, file=sys.stderr, ) sys.exit(1) self._has_lock = True self.write_lock() acquired = True if event: fire_event('manager.lock_acquired', self) yield finally: if acquired: self.release_lock() self._has_lock = False def write_lock(self, ipc_info: Optional[dict] = None) -> None: assert self._has_lock with open(self.lockfile, 'w', encoding='utf-8') as f: f.write(f'PID: {os.getpid()}\n') if ipc_info: for key in sorted(ipc_info): f.write(f'{key}: {ipc_info[key]}\n') def release_lock(self) -> None: try: os.remove(self.lockfile) except OSError as e: if e.errno != errno.ENOENT: raise logger.debug(f'Lockfile {self.lockfile} not found') else: logger.debug(f'Removed {self.lockfile}') def daemonize(self) -> None: """Daemonizes the current process. Returns the new pid""" if sys.platform.startswith('win'): logger.error('Cannot daemonize on windows') return if threading.active_count() != 1: logger.critical( 'There are {!r} active threads. Daemonizing now may cause strange failures.', threading.enumerate(), ) logger.info('Daemonizing...') try: pid = os.fork() if pid > 0: # Don't run the exit handlers on the parent atexit._exithandlers = [] # exit first parent sys.exit(0) except OSError as e: sys.stderr.write(f'fork #1 failed: {e.errno} ({e.strerror})\n') sys.exit(1) # decouple from parent environment os.chdir('/') os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # Don't run the exit handlers on the parent atexit._exithandlers = [] # exit from second parent sys.exit(0) except OSError as e: sys.stderr.write(f'fork #2 failed: {e.errno} ({e.strerror})\n') sys.exit(1) logger.info(f'Daemonize complete. New PID: {os.getpid()}') # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(os.devnull) so = open(os.devnull, 'ab+') se = open(os.devnull, 'ab+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # If we have a lock, update the lock file with our new pid if self._has_lock: self.write_lock() def db_cleanup(self, force: bool = False) -> None: """ Perform database cleanup if cleanup interval has been met. Fires events: * manager.db_cleanup If interval was met. Gives session to do the cleanup as a parameter. :param bool force: Run the cleanup no matter whether the interval has been met. """ expired = ( self.persist.get('last_cleanup', datetime(1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL ) if force or expired: logger.info('Running database cleanup.') with Session() as session: fire_event('manager.db_cleanup', self, session) # Try to VACUUM after cleanup fire_event('manager.db_vacuum', self) # Just in case some plugin was overzealous in its cleaning, mark the config changed self.config_changed() self.persist['last_cleanup'] = datetime.now() else: logger.debug('Not running db cleanup, last run {}', self.persist.get('last_cleanup')) def shutdown(self, finish_queue: bool = True) -> None: """ Request manager shutdown. :param bool finish_queue: Should scheduler finish the task queue """ if not self.initialized: raise RuntimeError('Cannot shutdown manager that was never initialized.') fire_event('manager.shutdown_requested', self) self.task_queue.shutdown(finish_queue) def _shutdown(self) -> None: """Runs when the manager is done processing everything.""" if self.ipc_server: self.ipc_server.shutdown() fire_event('manager.shutdown', self) if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled logger.debug('Shutting down') self.engine.dispose() # remove temporary database used in test mode if self.options.test: if 'test' not in self.db_filename: raise Exception('trying to delete non test database?') if self._has_lock: os.remove(self.db_filename) logger.info('Removed test database') global manager manager = None def matching_tasks(self, task: str) -> Optional[List[str]]: """Create list of tasks to run, preserving order""" task_names = [t for t in self.tasks if fnmatch.fnmatchcase(str(t).lower(), task.lower())] if not task_names: raise ValueError(f'`{task}` does not match any tasks') return task_names def crash_report(self) -> str: """ This should be called when handling an unexpected exception. Will create a new log file containing the last 50 debug messages as well as the crash traceback. """ if not self.unit_test: log_dir = os.path.dirname(self.log_filename) filename = os.path.join( log_dir, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log') ) with codecs.open(filename, 'w', encoding='utf-8') as outfile: outfile.writelines(flexget.log.debug_buffer) traceback.print_exc(file=outfile) logger.critical( 'An unexpected crash has occurred. Writing crash report to {}. ' 'Please verify you are running the latest version of flexget by using "flexget -V" ' 'from CLI or by using version_checker plugin' ' at https://flexget.com/Plugins/version_checker. ' 'You are currently using version {}', filename, get_current_flexget_version(), ) logger.opt(exception=True).debug('Traceback:') return traceback.format_exc()
(args: List[str]) -> None
52,275
flexget.manager
__init__
:param args: CLI args
def __init__(self, args: List[str]) -> None: """ :param args: CLI args """ global manager if not self.unit_test: assert not manager, 'Only one instance of Manager should be created at a time!' elif manager: logger.info('last manager was not torn down correctly') self.args = args self.autoreload_config = False self.config_file_hash: Optional[str] = None self.config_base: str = '' self.config_name: str = '' self.config_path: str = '' self.log_filename: str = '' self.db_filename: str = '' self.engine: Optional[Engine] = None self.lockfile: str = '' self.database_uri: str = '' self.db_upgraded = False self._has_lock = False self.is_daemon = False self.ipc_server: IPCServer self.task_queue: TaskQueue self.persist: SimplePersistence self.initialized = False self.config: Dict = {} self.options = self.parse_initial_options(args) self._init_config(create=False) # When we are in test mode, we use a different lock file and db if self.options.test: self.lockfile = os.path.join(self.config_base, f'.test-{self.config_name}-lock') self._init_logging() manager = self logger.debug('sys.defaultencoding: {}', sys.getdefaultencoding()) logger.debug('sys.getfilesystemencoding: {}', sys.getfilesystemencoding()) logger.debug('flexget detected io encoding: {}', io_encoding) logger.debug('os.path.supports_unicode_filenames: {}', os.path.supports_unicode_filenames) if ( codecs.lookup(sys.getfilesystemencoding()).name == 'ascii' and not os.path.supports_unicode_filenames ): logger.warning( 'Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from ' 'disk will not work properly for filenames containing non-ascii characters. Make sure your ' 'locale env variables are set up correctly for the environment which is launching FlexGet.' )
(self, args: List[str]) -> NoneType
52,276
flexget.manager
_add_tray_icon_items
null
def _add_tray_icon_items(self, tray_icon: 'TrayIcon'): tray_icon.add_menu_item(text='Shutdown', action=self.shutdown, index=2) tray_icon.add_menu_item(text='Reload Config', action=self.load_config, index=3) tray_icon.add_menu_separator(index=4)
(self, tray_icon: 'TrayIcon')
52,277
flexget.manager
_handle_sigterm
null
def _handle_sigterm(self, signum, frame) -> None: logger.info('Got SIGTERM. Shutting down.') self.shutdown(finish_queue=False)
(self, signum, frame) -> NoneType
52,278
flexget.manager
_init_config
Find and load the configuration file. :param bool create: If a config file is not found, and create is True, one will be created in the home folder :raises: `OSError` when no config file could be found, and `create` is False.
def _init_config(self, create: bool = False) -> None: """ Find and load the configuration file. :param bool create: If a config file is not found, and create is True, one will be created in the home folder :raises: `OSError` when no config file could be found, and `create` is False. """ home_path = os.path.join(os.path.expanduser('~'), '.flexget') options_config = os.path.expanduser(self.options.config) possible = [] if os.path.isabs(options_config): # explicit path given, don't try anything config = options_config possible = [config] else: logger.debug('Figuring out config load paths') try: possible.append(os.getcwd()) except OSError: logger.debug('current directory invalid, not searching for config there') # for virtualenv / dev sandbox if hasattr(sys, 'real_prefix'): logger.debug('Adding virtualenv path') possible.append(sys.prefix) # normal lookup locations possible.append(home_path) if sys.platform.startswith('win'): # On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot home_path = os.path.join(os.path.expanduser('~'), 'flexget') possible.append(home_path) else: # The freedesktop.org standard config location xdg_config = os.environ.get( 'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config') ) possible.append(os.path.join(xdg_config, 'flexget')) for path in possible: config = os.path.join(path, options_config) if os.path.exists(config): logger.debug('Found config: {}', config) break else: config = None if create and not (config and os.path.exists(config)): config = os.path.join(home_path, options_config) logger.info('Config file {} not found. Creating new config {}', options_config, config) with open(config, 'w') as newconfig: # Write empty tasks to the config newconfig.write(yaml.dump({'tasks': {}})) elif not config: logger.critical('Failed to find configuration file {}', options_config) logger.info('Tried to read from: {}', ', '.join(possible)) raise OSError('No configuration file found.') if not os.path.isfile(config): raise OSError(f'Config `{config}` does not appear to be a file.') logger.debug('Config file {} selected', config) self.config_path = config self.config_name = os.path.splitext(os.path.basename(config))[0] self.config_base = os.path.normpath(os.path.dirname(config)) self.lockfile = os.path.join(self.config_base, f'.{self.config_name}-lock') self.db_filename = os.path.join(self.config_base, f'db-{self.config_name}.sqlite')
(self, create: bool = False) -> NoneType
52,279
flexget.manager
_init_logging
Initialize logging variables
def _init_logging(self) -> None: """Initialize logging variables""" log_file = os.path.expanduser(self.options.logfile) # If an absolute path is not specified, use the config directory. if not os.path.isabs(log_file): log_file = os.path.join(self.config_base, log_file) self.log_filename = log_file
(self) -> NoneType
52,280
flexget.manager
_read_lock
Read the values from the lock file. Returns None if there is no current lock file.
def _read_lock(self) -> Optional[dict]: """ Read the values from the lock file. Returns None if there is no current lock file. """ if self.lockfile and os.path.exists(self.lockfile): result: Dict[str, Union[str, int]] = {} with open(self.lockfile, encoding='utf-8') as f: lines = [line for line in f.readlines() if line] for line in lines: try: key, value = line.split(':', 1) except ValueError: logger.debug('Invalid line in lock file: {}', line) continue result[key.strip().lower()] = value.strip() for key in result: if result[key].isdigit(): result[key] = int(result[key]) result.setdefault('pid', None) if not result['pid']: logger.error( 'Invalid lock file. Make sure FlexGet is not running, then delete it.' ) elif not pid_exists(result['pid']): return None return result return None
(self) -> Optional[dict]
52,281
flexget.manager
_shutdown
Runs when the manager is done processing everything.
def _shutdown(self) -> None: """Runs when the manager is done processing everything.""" if self.ipc_server: self.ipc_server.shutdown() fire_event('manager.shutdown', self) if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled logger.debug('Shutting down') self.engine.dispose() # remove temporary database used in test mode if self.options.test: if 'test' not in self.db_filename: raise Exception('trying to delete non test database?') if self._has_lock: os.remove(self.db_filename) logger.info('Removed test database') global manager manager = None
(self) -> NoneType
52,282
flexget.manager
acquire_lock
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
def execute( self, options: Optional[Union[dict, argparse.Namespace]] = None, priority: int = 1, suppress_warnings: Optional[Sequence[str]] = None, ) -> List[Tuple[str, str, threading.Event]]: """ Run all (can be limited with options) tasks from the config. :param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution :param priority: If there are other executions waiting to be run, they will be run in priority order, lowest first. :param suppress_warnings: Allows suppressing log warning about missing plugin in key phases :returns: a list of :class:`threading.Event` instances which will be set when each respective task has finished running """ if options is None: options = copy.copy(self.options.execute) elif isinstance(options, dict): options_namespace = copy.copy(self.options.execute) options_namespace.__dict__.update(options) options = options_namespace task_names = self.tasks # Only reload config if daemon config_hash = self.hash_config() if self.is_daemon and self.autoreload_config and self.config_file_hash != config_hash: logger.info('Config change detected. Reloading.') try: self.load_config(output_to_console=False, config_file_hash=config_hash) logger.info('Config successfully reloaded!') except Exception as e: logger.error('Reloading config failed: {}', e) # Handle --tasks if options.tasks: # Consider '*' the same as not specifying any tasks. # (So manual plugin doesn't consider them explicitly enabled.) if options.tasks == ['*']: options.tasks = None else: task_names = [] for task in options.tasks: try: task_names.extend( m for m in self.matching_tasks(task) if m not in task_names ) except ValueError as e: logger.error(e) continue options.tasks = task_names # TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one task_names = sorted( task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535) ) finished_events = [] for task_name in task_names: task = Task( self, task_name, options=options, output=get_console_output(), session_id=flexget.log.get_log_session_id(), priority=priority, suppress_warnings=suppress_warnings, ) self.task_queue.put(task) finished_events.append((task.id, task.name, task.finished_event)) return finished_events
(self, event: bool = True) -> Iterator
52,283
flexget.manager
backup_config
null
def backup_config(self) -> str: backup_path = os.path.join( self.config_base, f'{self.config_name}-{datetime.now().strftime("%y%m%d%H%M%S")}.bak', ) logger.debug(f'backing up old config to {backup_path} before new save') try: shutil.copy(self.config_path, backup_path) except OSError as e: logger.warning(f'Config backup creation failed: {e}') raise return backup_path
(self) -> str
52,284
flexget.manager
check_ipc_info
If a daemon has a lock on the database, return info to connect to IPC.
def check_ipc_info(self) -> Optional[dict]: """If a daemon has a lock on the database, return info to connect to IPC.""" lock_info = self._read_lock() if lock_info and 'port' in lock_info: return lock_info return None
(self) -> Optional[dict]
52,285
flexget.manager
check_lock
Returns True if there is a lock on the database.
def check_lock(self) -> bool: """Returns True if there is a lock on the database.""" lock_info = self._read_lock() if not lock_info: return False # Don't count it if we hold the lock if os.getpid() == lock_info['pid']: return False return True
(self) -> bool
52,286
flexget.manager
config_changed
Makes sure that all tasks will have the config_modified flag come out true on the next run. Useful when changing the db and all tasks need to be completely reprocessed.
def config_changed(self) -> None: """Makes sure that all tasks will have the config_modified flag come out true on the next run. Useful when changing the db and all tasks need to be completely reprocessed.""" from flexget.task import config_changed config_changed() fire_event('manager.config_updated', self)
(self) -> NoneType
52,287
flexget.manager
crash_report
This should be called when handling an unexpected exception. Will create a new log file containing the last 50 debug messages as well as the crash traceback.
def crash_report(self) -> str: """ This should be called when handling an unexpected exception. Will create a new log file containing the last 50 debug messages as well as the crash traceback. """ if not self.unit_test: log_dir = os.path.dirname(self.log_filename) filename = os.path.join( log_dir, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log') ) with codecs.open(filename, 'w', encoding='utf-8') as outfile: outfile.writelines(flexget.log.debug_buffer) traceback.print_exc(file=outfile) logger.critical( 'An unexpected crash has occurred. Writing crash report to {}. ' 'Please verify you are running the latest version of flexget by using "flexget -V" ' 'from CLI or by using version_checker plugin' ' at https://flexget.com/Plugins/version_checker. ' 'You are currently using version {}', filename, get_current_flexget_version(), ) logger.opt(exception=True).debug('Traceback:') return traceback.format_exc()
(self) -> str
52,288
flexget.manager
daemon_command
Handles the 'daemon' CLI command. Fires events: * manager.daemon.started * manager.daemon.completed :param options: argparse options
def daemon_command(self, options: argparse.Namespace) -> None: """ Handles the 'daemon' CLI command. Fires events: * manager.daemon.started * manager.daemon.completed :param options: argparse options """ # Import API so it can register to daemon.started event if options.action == 'start': if self.is_daemon: logger.error('Daemon already running for this config.') return elif self.task_queue.is_alive(): logger.error( 'Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.' ) return if options.daemonize: self.daemonize() if options.autoreload_config: self.autoreload_config = True try: signal.signal(signal.SIGTERM, self._handle_sigterm) except ValueError as e: # If flexget is being called from another script, e.g. windows service helper, and we are not the # main thread, this error will occur. logger.debug('Error registering sigterm handler: {}', e) self.is_daemon = True def run_daemon(tray_icon: 'TrayIcon' = None): fire_event('manager.daemon.started', self) self.task_queue.start() self.ipc_server.start() self.task_queue.wait() fire_event('manager.daemon.completed', self) if tray_icon: tray_icon.stop() if options.tray_icon: from flexget.tray_icon import tray_icon self._add_tray_icon_items(tray_icon) # Tray icon must be run in the main thread. m = threading.Thread(target=run_daemon, args=(tray_icon,)) m.start() tray_icon.run() m.join() else: run_daemon() elif options.action in ['stop', 'reload-config', 'status']: if not self.is_daemon: console('There does not appear to be a daemon running.') return if options.action == 'status': logger.debug('`daemon status` called. Daemon running. (PID: {})', os.getpid()) console(f'Daemon running. (PID: {os.getpid()})') elif options.action == 'stop': tasks = ( 'all queued tasks (if any) have' if options.wait else 'currently running task (if any) has' ) logger.info( 'Daemon shutdown requested. Shutdown will commence when {} finished executing.', tasks, ) self.shutdown(options.wait) elif options.action == 'reload-config': logger.info('Reloading config from disk.') try: self.load_config() except ValueError as e: logger.error('Error loading config: {}', e.args[0]) else: logger.info('Config successfully reloaded from disk.')
(self, options: argparse.Namespace) -> NoneType
52,289
flexget.manager
daemonize
Daemonizes the current process. Returns the new pid
def daemonize(self) -> None: """Daemonizes the current process. Returns the new pid""" if sys.platform.startswith('win'): logger.error('Cannot daemonize on windows') return if threading.active_count() != 1: logger.critical( 'There are {!r} active threads. Daemonizing now may cause strange failures.', threading.enumerate(), ) logger.info('Daemonizing...') try: pid = os.fork() if pid > 0: # Don't run the exit handlers on the parent atexit._exithandlers = [] # exit first parent sys.exit(0) except OSError as e: sys.stderr.write(f'fork #1 failed: {e.errno} ({e.strerror})\n') sys.exit(1) # decouple from parent environment os.chdir('/') os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # Don't run the exit handlers on the parent atexit._exithandlers = [] # exit from second parent sys.exit(0) except OSError as e: sys.stderr.write(f'fork #2 failed: {e.errno} ({e.strerror})\n') sys.exit(1) logger.info(f'Daemonize complete. New PID: {os.getpid()}') # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(os.devnull) so = open(os.devnull, 'ab+') se = open(os.devnull, 'ab+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # If we have a lock, update the lock file with our new pid if self._has_lock: self.write_lock()
(self) -> NoneType
52,290
flexget.manager
db_cleanup
Perform database cleanup if cleanup interval has been met. Fires events: * manager.db_cleanup If interval was met. Gives session to do the cleanup as a parameter. :param bool force: Run the cleanup no matter whether the interval has been met.
def db_cleanup(self, force: bool = False) -> None: """ Perform database cleanup if cleanup interval has been met. Fires events: * manager.db_cleanup If interval was met. Gives session to do the cleanup as a parameter. :param bool force: Run the cleanup no matter whether the interval has been met. """ expired = ( self.persist.get('last_cleanup', datetime(1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL ) if force or expired: logger.info('Running database cleanup.') with Session() as session: fire_event('manager.db_cleanup', self, session) # Try to VACUUM after cleanup fire_event('manager.db_vacuum', self) # Just in case some plugin was overzealous in its cleaning, mark the config changed self.config_changed() self.persist['last_cleanup'] = datetime.now() else: logger.debug('Not running db cleanup, last run {}', self.persist.get('last_cleanup'))
(self, force: bool = False) -> NoneType
52,291
flexget.manager
execute
Run all (can be limited with options) tasks from the config. :param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution :param priority: If there are other executions waiting to be run, they will be run in priority order, lowest first. :param suppress_warnings: Allows suppressing log warning about missing plugin in key phases :returns: a list of :class:`threading.Event` instances which will be set when each respective task has finished running
def execute( self, options: Optional[Union[dict, argparse.Namespace]] = None, priority: int = 1, suppress_warnings: Optional[Sequence[str]] = None, ) -> List[Tuple[str, str, threading.Event]]: """ Run all (can be limited with options) tasks from the config. :param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution :param priority: If there are other executions waiting to be run, they will be run in priority order, lowest first. :param suppress_warnings: Allows suppressing log warning about missing plugin in key phases :returns: a list of :class:`threading.Event` instances which will be set when each respective task has finished running """ if options is None: options = copy.copy(self.options.execute) elif isinstance(options, dict): options_namespace = copy.copy(self.options.execute) options_namespace.__dict__.update(options) options = options_namespace task_names = self.tasks # Only reload config if daemon config_hash = self.hash_config() if self.is_daemon and self.autoreload_config and self.config_file_hash != config_hash: logger.info('Config change detected. Reloading.') try: self.load_config(output_to_console=False, config_file_hash=config_hash) logger.info('Config successfully reloaded!') except Exception as e: logger.error('Reloading config failed: {}', e) # Handle --tasks if options.tasks: # Consider '*' the same as not specifying any tasks. # (So manual plugin doesn't consider them explicitly enabled.) if options.tasks == ['*']: options.tasks = None else: task_names = [] for task in options.tasks: try: task_names.extend( m for m in self.matching_tasks(task) if m not in task_names ) except ValueError as e: logger.error(e) continue options.tasks = task_names # TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one task_names = sorted( task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535) ) finished_events = [] for task_name in task_names: task = Task( self, task_name, options=options, output=get_console_output(), session_id=flexget.log.get_log_session_id(), priority=priority, suppress_warnings=suppress_warnings, ) self.task_queue.put(task) finished_events.append((task.id, task.name, task.finished_event)) return finished_events
(self, options: Union[dict, argparse.Namespace, NoneType] = None, priority: int = 1, suppress_warnings: Optional[Sequence[str]] = None) -> List[Tuple[str, str, threading.Event]]
52,292
flexget.manager
execute_command
Handles the 'execute' CLI command. If there is already a task queue running in this process, adds the execution to the queue. If FlexGet is being invoked with this command, starts up a task queue and runs the execution. Fires events: * manager.execute.started * manager.execute.completed :param options: argparse options
def execute_command(self, options: argparse.Namespace) -> None: """ Handles the 'execute' CLI command. If there is already a task queue running in this process, adds the execution to the queue. If FlexGet is being invoked with this command, starts up a task queue and runs the execution. Fires events: * manager.execute.started * manager.execute.completed :param options: argparse options """ fire_event('manager.execute.started', self, options) if self.task_queue.is_alive() or self.is_daemon: if not self.task_queue.is_alive(): logger.error( 'Task queue has died unexpectedly. Restarting it. Please open an issue on Github and include' ' any previous error logs.' ) self.task_queue = TaskQueue() self.task_queue.start() if len(self.task_queue): logger.verbose('There is a task already running, execution queued.') finished_events = self.execute(options) if not options.cron: # Wait until execution of all tasks has finished for _, _, event in finished_events: event.wait() else: self.task_queue.start() self.ipc_server.start() self.execute(options) self.shutdown(finish_queue=True) self.task_queue.wait() fire_event('manager.execute.completed', self, options)
(self, options: argparse.Namespace) -> NoneType
52,293
flexget.manager
handle_cli
Dispatch a cli command to the appropriate function. * :meth:`.execute_command` * :meth:`.daemon_command` * CLI plugin callback function The manager should have a lock and be initialized before calling this method. :param options: argparse options for command. Defaults to options that manager was instantiated with.
def handle_cli(self, options: Optional[argparse.Namespace] = None) -> None: """ Dispatch a cli command to the appropriate function. * :meth:`.execute_command` * :meth:`.daemon_command` * CLI plugin callback function The manager should have a lock and be initialized before calling this method. :param options: argparse options for command. Defaults to options that manager was instantiated with. """ if not options: options = self.options command = options.cli_command if command is None: raise Exception('Command missing') command_options = getattr(options, command) # First check for built-in commands if command in ['execute', 'daemon']: if command == 'execute': self.execute_command(command_options) elif command == 'daemon': self.daemon_command(command_options) else: # Otherwise dispatch the command to the callback function options.cli_command_callback(self, command_options)
(self, options: Optional[argparse.Namespace] = None) -> NoneType
52,294
flexget.manager
hash_config
null
def hash_config(self) -> Optional[str]: if not self.config_path: return None sha1_hash = hashlib.sha1() with open(self.config_path, 'rb') as f: while True: data = f.read(65536) if not data: break sha1_hash.update(data) return sha1_hash.hexdigest()
(self) -> Optional[str]
52,295
flexget.manager
init_sqlalchemy
Initialize SQLAlchemy
def init_sqlalchemy(self) -> None: """Initialize SQLAlchemy""" try: if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]: print( 'FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.', file=sys.stderr, ) sys.exit(1) except ValueError: logger.critical('Failed to check SQLAlchemy version, you may need to upgrade it') # SQLAlchemy if not self.database_uri: # in case running on windows, needs double \\ filename = self.db_filename.replace('\\', '\\\\') self.database_uri = f'sqlite:///{filename}' if self.db_filename and not os.path.exists(self.db_filename): logger.verbose('Creating new database {} - DO NOT INTERRUPT ...', self.db_filename) # fire up the engine logger.debug('Connecting to: {}', self.database_uri) try: self.engine = sqlalchemy.create_engine( self.database_uri, echo=self.options.debug_sql, connect_args={'check_same_thread': False, 'timeout': 10}, ) except ImportError as e: print( 'FATAL: Unable to use SQLite. Are you running Python 2.7, 3.3 or newer ?\n' 'Python should normally have SQLite support built in.\n' 'If you\'re running correct version of Python then it is not equipped with SQLite.\n' 'You can try installing `pysqlite`. If you have compiled python yourself, ' 'recompile it with SQLite support.\n' f'Error: {e}', file=sys.stderr, ) sys.exit(1) Session.configure(bind=self.engine) # create all tables, doesn't do anything to existing tables try: Base.metadata.create_all(bind=self.engine) except OperationalError as e: if os.path.exists(self.db_filename): print( f'{e.message} - make sure you have write permissions to file {self.db_filename}', file=sys.stderr, ) else: print( f'{e.message} - make sure you have write permissions to directory {self.config_base}', file=sys.stderr, ) raise
(self) -> NoneType
52,296
flexget.manager
initialize
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server. This should only be called after obtaining a lock.
def initialize(self) -> None: """ Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server. This should only be called after obtaining a lock. """ if self.initialized: raise RuntimeError('Cannot call initialize on an already initialized manager.') plugin.load_plugins( extra_plugins=[os.path.join(self.config_base, 'plugins')], extra_components=[os.path.join(self.config_base, 'components')], ) # Reparse CLI options now that plugins are loaded self.options = get_parser().parse_args(self.args) self.task_queue = TaskQueue() self.ipc_server = IPCServer(self, self.options.ipc_port) self.setup_yaml() self.init_sqlalchemy() fire_event('manager.initialize', self) try: self.load_config() except ValueError as e: logger.critical('Failed to load config file: {}', e.args[0]) raise # cannot be imported at module level because of circular references from flexget.utils.simple_persistence import SimplePersistence self.persist = SimplePersistence('manager') if db_schema.upgrade_required(): logger.info('Database upgrade is required. Attempting now.') fire_event('manager.upgrade', self) if manager.db_upgraded: fire_event('manager.db_upgraded', self) fire_event('manager.startup', self) self.initialized = True
(self) -> NoneType
52,297
flexget.manager
load_config
Loads the config file from disk, validates and activates it. :raises: `ValueError` if there is a problem loading the config file
def load_config( self, output_to_console: bool = True, config_file_hash: Optional[str] = None ) -> None: """ Loads the config file from disk, validates and activates it. :raises: `ValueError` if there is a problem loading the config file """ fire_event('manager.before_config_load', self) with open(self.config_path, encoding='utf-8') as f: try: raw_config = f.read() except UnicodeDecodeError: logger.critical('Config file must be UTF-8 encoded.') raise ValueError('Config file is not UTF-8 encoded') try: self.config_file_hash = config_file_hash or self.hash_config() config = yaml.safe_load(raw_config) or {} except yaml.YAMLError as e: msg = str(e).replace('\n', ' ') msg = ' '.join(msg.split()) logger.critical(msg) if output_to_console: print('') print('-' * 79) print(' Malformed configuration file (check messages above). Common reasons:') print('-' * 79) print('') print(' o Indentation error') print(' o Missing : from end of the line') print(' o Non ASCII characters (use UTF8)') print( ' o If text contains any of :[]{}% characters it must be single-quoted ' '(eg. value{1} should be \'value{1}\')\n' ) # Not very good practice but we get several kind of exceptions here, I'm not even sure all of them # At least: ReaderError, YmlScannerError (or something like that) if isinstance(e, yaml.MarkedYAMLError): lines = 0 if e.problem is not None: print(f' Reason: {e.problem}\n') if e.problem == 'mapping values are not allowed here': print(' ----> MOST LIKELY REASON: Missing : from end of the line!') print('') if e.context_mark is not None: print( f' Check configuration near line {e.context_mark.line}, column {e.context_mark.column}' ) lines += 1 if e.problem_mark is not None: print( f' Check configuration near line {e.problem_mark.line}, column {e.problem_mark.column}' ) lines += 1 if lines: print('') if lines == 1: print(' Fault is almost always in this or previous line\n') if lines == 2: print(' Fault is almost always in one of these lines or previous ones\n') # When --debug escalate to full stacktrace if self.options.debug or not output_to_console: raise raise ValueError('Config file is not valid YAML') # config loaded successfully logger.debug('config_name: {}', self.config_name) logger.debug('config_base: {}', self.config_base) # Install the newly loaded config self.update_config(config)
(self, output_to_console: bool = True, config_file_hash: Optional[str] = None) -> NoneType
52,298
flexget.manager
matching_tasks
Create list of tasks to run, preserving order
def matching_tasks(self, task: str) -> Optional[List[str]]: """Create list of tasks to run, preserving order""" task_names = [t for t in self.tasks if fnmatch.fnmatchcase(str(t).lower(), task.lower())] if not task_names: raise ValueError(f'`{task}` does not match any tasks') return task_names
(self, task: str) -> Optional[List[str]]
52,299
flexget.manager
parse_initial_options
Parse what we can from cli args before plugins are loaded.
@staticmethod def parse_initial_options(args: List[str]) -> argparse.Namespace: """Parse what we can from cli args before plugins are loaded.""" try: options = CoreArgumentParser().parse_known_args(args, do_help=False)[0] except ParserError as exc: try: # If a non-built-in command was used, we need to parse with a parser that # doesn't define the subparsers options = manager_parser.parse_known_args(args, do_help=False)[0] except ParserError: manager_parser.print_help() print(f'\nError: {exc.message}') sys.exit(1) return options
(args: List[str]) -> argparse.Namespace
52,300
flexget.manager
release_lock
null
def release_lock(self) -> None: try: os.remove(self.lockfile) except OSError as e: if e.errno != errno.ENOENT: raise logger.debug(f'Lockfile {self.lockfile} not found') else: logger.debug(f'Removed {self.lockfile}')
(self) -> NoneType
52,301
flexget.manager
save_config
Dumps current config to yaml config file
def save_config(self) -> None: """Dumps current config to yaml config file""" # TODO: Only keep x number of backups.. # Back up the user's current config before overwriting try: self.backup_config() except OSError: return with open(self.config_path, 'w') as config_file: config_file.write(yaml.dump(self.user_config, default_flow_style=False))
(self) -> NoneType
52,302
flexget.manager
setup_yaml
Customize the yaml loader/dumper behavior
def setup_yaml(self) -> None: """Customize the yaml loader/dumper behavior""" # Represent OrderedDict as a regular dict (but don't sort it alphabetically) # This lets us order a dict in a yaml file for easier human consumption def represent_dict_order(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data.items()) yaml.add_representer(collections.OrderedDict, represent_dict_order) # Set up the dumper to increase the indent for lists def increase_indent_wrapper(func): def increase_indent(self, flow=False, indentless=False): func(self, flow, False) return increase_indent yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent) yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
(self) -> NoneType
52,303
flexget.manager
shutdown
Request manager shutdown. :param bool finish_queue: Should scheduler finish the task queue
def shutdown(self, finish_queue: bool = True) -> None: """ Request manager shutdown. :param bool finish_queue: Should scheduler finish the task queue """ if not self.initialized: raise RuntimeError('Cannot shutdown manager that was never initialized.') fire_event('manager.shutdown_requested', self) self.task_queue.shutdown(finish_queue)
(self, finish_queue: bool = True) -> NoneType
52,304
flexget.manager
start
Starting point when executing from commandline, dispatch execution to correct destination. If there is a FlexGet process with an ipc server already running, the command will be sent there for execution and results will be streamed back. If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
def start(self) -> None: """ Starting point when executing from commandline, dispatch execution to correct destination. If there is a FlexGet process with an ipc server already running, the command will be sent there for execution and results will be streamed back. If not, this will attempt to obtain a lock, initialize the manager, and run the command here. """ # If another process is started, send the execution to the running process ipc_info = self.check_ipc_info() # If we are connecting to a running daemon, we don't want to log to the log file, # the daemon is already handling that. if ipc_info: console( 'There is a FlexGet process already running for this config, sending execution there.' ) logger.debug('Sending command to running FlexGet process: {}', self.args) try: client = IPCClient(ipc_info['port'], ipc_info['password']) except ValueError as e: logger.error(e) else: try: client.handle_cli(self.args) except KeyboardInterrupt: logger.error( 'Disconnecting from daemon due to ctrl-c. Executions will still continue in the ' 'background.' ) except EOFError: logger.error('Connection from daemon was severed.') return if self.options.test: logger.info('Test mode, creating a copy from database ...') db_test_filename = os.path.join(self.config_base, f'test-{self.config_name}.sqlite') if os.path.exists(self.db_filename): shutil.copy(self.db_filename, db_test_filename) logger.info('Test database created') self.db_filename = db_test_filename # No running process, we start our own to handle command with self.acquire_lock(): self.initialize() self.handle_cli() self._shutdown()
(self) -> NoneType
52,305
flexget.manager
update_config
Provide a new config for the manager to use. :raises: `ValueError` and rolls back to previous config if the provided config is not valid.
def update_config(self, config: dict) -> None: """ Provide a new config for the manager to use. :raises: `ValueError` and rolls back to previous config if the provided config is not valid. """ new_user_config = config old_config = self.config try: self.config = self.validate_config(config) except ConfigError as e: for error in getattr(e, 'errors', []): logger.critical('[{}] {}', error.json_pointer, error.message) logger.debug('invalid config, rolling back') self.config = old_config raise logger.debug('New config data loaded.') self.user_config = copy.deepcopy(new_user_config) fire_event('manager.config_updated', self)
(self, config: dict) -> NoneType
52,306
flexget.manager
validate_config
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified config will be returned. :param config: Config to check. If not provided, current manager config will be checked. :raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors. :returns: Final validated config.
def validate_config(self, config: Optional[dict] = None) -> dict: """ Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified config will be returned. :param config: Config to check. If not provided, current manager config will be checked. :raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors. :returns: Final validated config. """ conf = config if config else self.config conf = fire_event('manager.before_config_validate', conf, self) errors = config_schema.process_config(conf) if errors: err = ConfigError('Did not pass schema validation.') err.errors = errors raise err else: return conf
(self, config: Optional[dict] = None) -> dict
52,307
flexget.manager
write_lock
null
def write_lock(self, ipc_info: Optional[dict] = None) -> None: assert self._has_lock with open(self.lockfile, 'w', encoding='utf-8') as f: f.write(f'PID: {os.getpid()}\n') if ipc_info: for key in sorted(ipc_info): f.write(f'{key}: {ipc_info[key]}\n')
(self, ipc_info: Optional[dict] = None) -> NoneType
52,308
flexget
_is_debug
null
def _is_debug() -> bool: return any( arg in ['debug', '--debug', '--loglevel=trace', '--loglevel=debug'] for arg in [a.lower() for a in sys.argv] )
() -> bool
52,317
flexget
main
Main entry point for Command Line Interface
def main(args: Optional[Sequence[str]] = None): """Main entry point for Command Line Interface""" if args is None: args = sys.argv[1:] try: log.initialize() try: manager = Manager(args) except (OSError, ValueError) as e: options = Manager.parse_initial_options(args) log.start(level=options.loglevel, to_file=False) if _is_debug(): import traceback traceback.print_exc() else: print('Could not instantiate manager: %s' % e, file=sys.stderr) sys.exit(1) else: log.start( manager.log_filename, manager.options.loglevel, to_file=manager.check_ipc_info() is None, to_console=not manager.options.cron, ) try: if manager.options.profile: try: import cProfile as profile except ImportError: import profile profile.runctx( 'manager.start()', globals(), locals(), os.path.join(manager.config_base, manager.options.profile), ) else: manager.start() except (OSError, ValueError) as e: if _is_debug(): import traceback traceback.print_exc() else: print('Could not start manager: %s' % e, file=sys.stderr) sys.exit(1) except KeyboardInterrupt: if _is_debug(): import traceback traceback.print_exc() print('Killed with keyboard interrupt.', file=sys.stderr) sys.exit(1)
(args: Optional[Sequence[str]] = None)
52,329
pgeocode
GeoDistance
Distance calculation from a city name or a postal code Parameters ---------- data_path: str path to the dataset error: str, default='ignore' how to handle not found elements. One of 'ignore' (return NaNs), 'error' (raise an exception), 'nearest' (find from nearest valid points)
class GeoDistance(Nominatim): """Distance calculation from a city name or a postal code Parameters ---------- data_path: str path to the dataset error: str, default='ignore' how to handle not found elements. One of 'ignore' (return NaNs), 'error' (raise an exception), 'nearest' (find from nearest valid points) """ def __init__(self, country: str = "fr", errors: str = "ignore"): super().__init__(country) def query_postal_code(self, x, y): """Get distance (in km) between postal codes Parameters ---------- x: array, list or int a list of postal codes y: array, list or int a list of postal codes Returns ------- d : array or int the calculated distances """ if isinstance(x, int): x = str(x) if isinstance(y, int): y = str(y) if isinstance(x, str): x = [x] single_x_entry = True else: single_x_entry = False df_x = super().query_postal_code(x) if isinstance(y, str): y = [y] single_y_entry = True else: single_y_entry = False df_y = super().query_postal_code(y) x_coords = df_x[["latitude", "longitude"]].values y_coords = df_y[["latitude", "longitude"]].values if x_coords.shape[0] == y_coords.shape[0]: pass elif x_coords.shape[0] == 1: x_coords = np.repeat(x_coords, y_coords.shape[0], axis=0) elif y_coords.shape[0] == 1: y_coords = np.repeat(y_coords, x_coords.shape[0], axis=0) else: raise ValueError("x and y must have the same number of elements") dist = haversine_distance(x_coords, y_coords) if single_x_entry and single_y_entry: return dist[0] else: return dist
(country: str = 'fr', errors: str = 'ignore')
52,330
pgeocode
__init__
null
def __init__(self, country: str = "fr", errors: str = "ignore"): super().__init__(country)
(self, country: str = 'fr', errors: str = 'ignore')
52,331
pgeocode
_fuzzy_search
null
def _fuzzy_search(self, text: str, col: str, threshold: float = 80) -> pd.DataFrame: try: # thefuzz is not required to install pgeocode, # it is an optional dependency for enabling fuzzy search from thefuzz import fuzz except ModuleNotFoundError as err: raise ModuleNotFoundError( "Cannot use fuzzy search without 'thefuzz' package. " "It can be installed with: pip install thefuzz[speedup]" ) from err fuzzy_scores = self._data[col].apply(lambda x: fuzz.ratio(str(x), text)) return self._data[fuzzy_scores >= threshold]
(self, text: str, col: str, threshold: float = 80) -> pandas.core.frame.DataFrame
52,332
pgeocode
_get_data
Load the data from disk; otherwise download and save it
@staticmethod def _get_data(country: str) -> tuple[str, pd.DataFrame]: """Load the data from disk; otherwise download and save it""" data_path = os.path.join(STORAGE_DIR, country.upper() + ".txt") if os.path.exists(data_path): data = pd.read_csv( data_path, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) else: download_urls = [val.format(country=country) for val in DOWNLOAD_URL] with _open_extract_cycle_url(download_urls, country) as fh: data = pd.read_csv( fh, sep="\t", header=None, names=DATA_FIELDS, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) os.makedirs(STORAGE_DIR, exist_ok=True) data.to_csv(data_path, index=None) return data_path, data
(country: str) -> tuple[str, pandas.core.frame.DataFrame]
52,333
pgeocode
_index_postal_codes
Create a dataframe with unique postal codes
def _index_postal_codes(self) -> pd.DataFrame: """Create a dataframe with unique postal codes""" data_path_unique = self._data_path.replace(".txt", "-index.txt") if os.path.exists(data_path_unique): data_unique = pd.read_csv( data_path_unique, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) else: # group together places with the same postal code df_unique_cp_group = self._data.groupby("postal_code") data_unique = df_unique_cp_group[["latitude", "longitude"]].mean() valid_keys = set(DATA_FIELDS).difference( ["place_name", "latitude", "longitude", "postal_code"] ) data_unique["place_name"] = df_unique_cp_group["place_name"].apply( lambda x: ", ".join([str(el) for el in x]) ) for key in valid_keys: data_unique[key] = df_unique_cp_group[key].first() data_unique = data_unique.reset_index()[DATA_FIELDS] data_unique.to_csv(data_path_unique, index=None) return data_unique
(self) -> pandas.core.frame.DataFrame
52,334
pgeocode
_normalize_postal_code
Normalize postal codes to the values contained in the database For instance, take into account only first letters when applicable. Takes in a pd.DataFrame
def _normalize_postal_code(self, codes: pd.DataFrame) -> pd.DataFrame: """Normalize postal codes to the values contained in the database For instance, take into account only first letters when applicable. Takes in a pd.DataFrame """ codes["postal_code"] = codes.postal_code.str.upper() if self.country in ["GB", "IE", "CA"]: codes["postal_code"] = codes.postal_code.str.split().str.get(0) else: pass return codes
(self, codes: pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame
52,335
pgeocode
_str_contains_search
null
def _str_contains_search(self, text: str, col: str) -> pd.DataFrame: match_mask = self._data[col].str.lower().str.contains(text.lower()) match_mask.fillna(False, inplace=True) return self._data[match_mask]
(self, text: str, col: str) -> pandas.core.frame.DataFrame
52,336
pgeocode
query_location
Get location information from a place name Parameters ---------- name: str string containing place names to search for. The search is case insensitive. top_k: int maximum number of results (rows in DataFrame) to return fuzzy_threshold: Optional[int] threshold (lower bound) for fuzzy string search for finding the place name, default None (=no fuzzy search) pass an integer value between 70 and 100 to enable fuzzy search (the lower the value, the more results) requires 'thefuzz' package to be installed: pip install thefuzz[speedup] (for more info: https://github.com/seatgeek/thefuzz) col: str which column in the internal data to search through Returns ------- df : pandas.DataFrame a DataFrame with the relevant information for all matching place names (or empty if no match was found)
def query_location( self, name: str, top_k: int = 100, fuzzy_threshold: int | None = None, col: str = "place_name", ) -> pd.DataFrame: """Get location information from a place name Parameters ---------- name: str string containing place names to search for. The search is case insensitive. top_k: int maximum number of results (rows in DataFrame) to return fuzzy_threshold: Optional[int] threshold (lower bound) for fuzzy string search for finding the place name, default None (=no fuzzy search) pass an integer value between 70 and 100 to enable fuzzy search (the lower the value, the more results) requires 'thefuzz' package to be installed: pip install thefuzz[speedup] (for more info: https://github.com/seatgeek/thefuzz) col: str which column in the internal data to search through Returns ------- df : pandas.DataFrame a DataFrame with the relevant information for all matching place names (or empty if no match was found) """ contains_matches = self._str_contains_search(name, col) if len(contains_matches) > 0: return contains_matches.iloc[:top_k] if fuzzy_threshold is not None: fuzzy_matches = self._fuzzy_search(name, col, threshold=fuzzy_threshold) if len(fuzzy_matches) > 0: return fuzzy_matches.iloc[:top_k] return pd.DataFrame(columns=self._data.columns)
(self, name: str, top_k: int = 100, fuzzy_threshold: Optional[int] = None, col: str = 'place_name') -> pandas.core.frame.DataFrame
52,337
pgeocode
query_postal_code
Get distance (in km) between postal codes Parameters ---------- x: array, list or int a list of postal codes y: array, list or int a list of postal codes Returns ------- d : array or int the calculated distances
def query_postal_code(self, x, y): """Get distance (in km) between postal codes Parameters ---------- x: array, list or int a list of postal codes y: array, list or int a list of postal codes Returns ------- d : array or int the calculated distances """ if isinstance(x, int): x = str(x) if isinstance(y, int): y = str(y) if isinstance(x, str): x = [x] single_x_entry = True else: single_x_entry = False df_x = super().query_postal_code(x) if isinstance(y, str): y = [y] single_y_entry = True else: single_y_entry = False df_y = super().query_postal_code(y) x_coords = df_x[["latitude", "longitude"]].values y_coords = df_y[["latitude", "longitude"]].values if x_coords.shape[0] == y_coords.shape[0]: pass elif x_coords.shape[0] == 1: x_coords = np.repeat(x_coords, y_coords.shape[0], axis=0) elif y_coords.shape[0] == 1: y_coords = np.repeat(y_coords, x_coords.shape[0], axis=0) else: raise ValueError("x and y must have the same number of elements") dist = haversine_distance(x_coords, y_coords) if single_x_entry and single_y_entry: return dist[0] else: return dist
(self, x, y)
52,338
pgeocode
Nominatim
Query geographical location from a city name or a postal code Parameters ---------- country: str, default='fr' country code. See the documentation for a list of supported countries. unique: bool, default=True Create unique postcode index, merging all places with the same postcode into a single entry
class Nominatim: """Query geographical location from a city name or a postal code Parameters ---------- country: str, default='fr' country code. See the documentation for a list of supported countries. unique: bool, default=True Create unique postcode index, merging all places with the same postcode into a single entry """ def __init__(self, country: str = "fr", unique: bool = True): country = country.upper() if country not in COUNTRIES_VALID: raise ValueError( f"country={country} is not a known country code. " "See the README for a list of supported " "countries" ) if country == "AR": warnings.warn( "The Argentina data file contains 4-digit postal " "codes which were replaced with a new system " "in 1999." ) self.country = country self._data_path, self._data = self._get_data(country) if unique: self._data_frame = self._index_postal_codes() else: self._data_frame = self._data self.unique = unique @staticmethod def _get_data(country: str) -> tuple[str, pd.DataFrame]: """Load the data from disk; otherwise download and save it""" data_path = os.path.join(STORAGE_DIR, country.upper() + ".txt") if os.path.exists(data_path): data = pd.read_csv( data_path, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) else: download_urls = [val.format(country=country) for val in DOWNLOAD_URL] with _open_extract_cycle_url(download_urls, country) as fh: data = pd.read_csv( fh, sep="\t", header=None, names=DATA_FIELDS, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) os.makedirs(STORAGE_DIR, exist_ok=True) data.to_csv(data_path, index=None) return data_path, data def _index_postal_codes(self) -> pd.DataFrame: """Create a dataframe with unique postal codes""" data_path_unique = self._data_path.replace(".txt", "-index.txt") if os.path.exists(data_path_unique): data_unique = pd.read_csv( data_path_unique, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) else: # group together places with the same postal code df_unique_cp_group = self._data.groupby("postal_code") data_unique = df_unique_cp_group[["latitude", "longitude"]].mean() valid_keys = set(DATA_FIELDS).difference( ["place_name", "latitude", "longitude", "postal_code"] ) data_unique["place_name"] = df_unique_cp_group["place_name"].apply( lambda x: ", ".join([str(el) for el in x]) ) for key in valid_keys: data_unique[key] = df_unique_cp_group[key].first() data_unique = data_unique.reset_index()[DATA_FIELDS] data_unique.to_csv(data_path_unique, index=None) return data_unique def _normalize_postal_code(self, codes: pd.DataFrame) -> pd.DataFrame: """Normalize postal codes to the values contained in the database For instance, take into account only first letters when applicable. Takes in a pd.DataFrame """ codes["postal_code"] = codes.postal_code.str.upper() if self.country in ["GB", "IE", "CA"]: codes["postal_code"] = codes.postal_code.str.split().str.get(0) else: pass return codes def query_postal_code(self, codes): """Get locations information from postal codes Parameters ---------- codes: array, list or int an array of strings containing postal codes Returns ------- df : pandas.DataFrame a pandas.DataFrame with the relevant information """ if isinstance(codes, int): codes = str(codes) if isinstance(codes, str): codes = [codes] single_entry = True else: single_entry = False if not isinstance(codes, pd.DataFrame): codes = pd.DataFrame(codes, columns=["postal_code"]) codes = self._normalize_postal_code(codes) response = pd.merge(codes, self._data_frame, on="postal_code", how="left") if self.unique and single_entry: response = response.iloc[0] return response def query_location( self, name: str, top_k: int = 100, fuzzy_threshold: int | None = None, col: str = "place_name", ) -> pd.DataFrame: """Get location information from a place name Parameters ---------- name: str string containing place names to search for. The search is case insensitive. top_k: int maximum number of results (rows in DataFrame) to return fuzzy_threshold: Optional[int] threshold (lower bound) for fuzzy string search for finding the place name, default None (=no fuzzy search) pass an integer value between 70 and 100 to enable fuzzy search (the lower the value, the more results) requires 'thefuzz' package to be installed: pip install thefuzz[speedup] (for more info: https://github.com/seatgeek/thefuzz) col: str which column in the internal data to search through Returns ------- df : pandas.DataFrame a DataFrame with the relevant information for all matching place names (or empty if no match was found) """ contains_matches = self._str_contains_search(name, col) if len(contains_matches) > 0: return contains_matches.iloc[:top_k] if fuzzy_threshold is not None: fuzzy_matches = self._fuzzy_search(name, col, threshold=fuzzy_threshold) if len(fuzzy_matches) > 0: return fuzzy_matches.iloc[:top_k] return pd.DataFrame(columns=self._data.columns) def _str_contains_search(self, text: str, col: str) -> pd.DataFrame: match_mask = self._data[col].str.lower().str.contains(text.lower()) match_mask.fillna(False, inplace=True) return self._data[match_mask] def _fuzzy_search(self, text: str, col: str, threshold: float = 80) -> pd.DataFrame: try: # thefuzz is not required to install pgeocode, # it is an optional dependency for enabling fuzzy search from thefuzz import fuzz except ModuleNotFoundError as err: raise ModuleNotFoundError( "Cannot use fuzzy search without 'thefuzz' package. " "It can be installed with: pip install thefuzz[speedup]" ) from err fuzzy_scores = self._data[col].apply(lambda x: fuzz.ratio(str(x), text)) return self._data[fuzzy_scores >= threshold]
(country: str = 'fr', unique: bool = True)
52,339
pgeocode
__init__
null
def __init__(self, country: str = "fr", unique: bool = True): country = country.upper() if country not in COUNTRIES_VALID: raise ValueError( f"country={country} is not a known country code. " "See the README for a list of supported " "countries" ) if country == "AR": warnings.warn( "The Argentina data file contains 4-digit postal " "codes which were replaced with a new system " "in 1999." ) self.country = country self._data_path, self._data = self._get_data(country) if unique: self._data_frame = self._index_postal_codes() else: self._data_frame = self._data self.unique = unique
(self, country: str = 'fr', unique: bool = True)
52,346
pgeocode
query_postal_code
Get locations information from postal codes Parameters ---------- codes: array, list or int an array of strings containing postal codes Returns ------- df : pandas.DataFrame a pandas.DataFrame with the relevant information
def query_postal_code(self, codes): """Get locations information from postal codes Parameters ---------- codes: array, list or int an array of strings containing postal codes Returns ------- df : pandas.DataFrame a pandas.DataFrame with the relevant information """ if isinstance(codes, int): codes = str(codes) if isinstance(codes, str): codes = [codes] single_entry = True else: single_entry = False if not isinstance(codes, pd.DataFrame): codes = pd.DataFrame(codes, columns=["postal_code"]) codes = self._normalize_postal_code(codes) response = pd.merge(codes, self._data_frame, on="postal_code", how="left") if self.unique and single_entry: response = response.iloc[0] return response
(self, codes)
52,347
zipfile
ZipFile
Class with methods to open, read, write, close, list zip files. z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True, compresslevel=None) file: Either the path to the file, or a file-like object. If it is a path, the file will be opened and closed by ZipFile. mode: The mode can be either read 'r', write 'w', exclusive create 'x', or append 'a'. compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib), ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma). allowZip64: if True ZipFile will create files with ZIP64 extensions when needed, otherwise it will raise an exception when this would be necessary. compresslevel: None (default for the given compression type) or an integer specifying the level to pass to the compressor. When using ZIP_STORED or ZIP_LZMA this keyword has no effect. When using ZIP_DEFLATED integers 0 through 9 are accepted. When using ZIP_BZIP2 integers 1 through 9 are accepted.
class ZipFile: """ Class with methods to open, read, write, close, list zip files. z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True, compresslevel=None) file: Either the path to the file, or a file-like object. If it is a path, the file will be opened and closed by ZipFile. mode: The mode can be either read 'r', write 'w', exclusive create 'x', or append 'a'. compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib), ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma). allowZip64: if True ZipFile will create files with ZIP64 extensions when needed, otherwise it will raise an exception when this would be necessary. compresslevel: None (default for the given compression type) or an integer specifying the level to pass to the compressor. When using ZIP_STORED or ZIP_LZMA this keyword has no effect. When using ZIP_DEFLATED integers 0 through 9 are accepted. When using ZIP_BZIP2 integers 1 through 9 are accepted. """ fp = None # Set here since __del__ checks it _windows_illegal_name_trans_table = None def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True, compresslevel=None, *, strict_timestamps=True): """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x', or append 'a'.""" if mode not in ('r', 'w', 'x', 'a'): raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'") _check_compression(compression) self._allowZip64 = allowZip64 self._didModify = False self.debug = 0 # Level of printing: 0 through 3 self.NameToInfo = {} # Find file info given name self.filelist = [] # List of ZipInfo instances for archive self.compression = compression # Method of compression self.compresslevel = compresslevel self.mode = mode self.pwd = None self._comment = b'' self._strict_timestamps = strict_timestamps # Check if we were passed a file-like object if isinstance(file, os.PathLike): file = os.fspath(file) if isinstance(file, str): # No, it's a filename self._filePassed = 0 self.filename = file modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b', 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'} filemode = modeDict[mode] while True: try: self.fp = io.open(file, filemode) except OSError: if filemode in modeDict: filemode = modeDict[filemode] continue raise break else: self._filePassed = 1 self.fp = file self.filename = getattr(file, 'name', None) self._fileRefCnt = 1 self._lock = threading.RLock() self._seekable = True self._writing = False try: if mode == 'r': self._RealGetContents() elif mode in ('w', 'x'): # set the modified flag so central directory gets written # even if no files are added to the archive self._didModify = True try: self.start_dir = self.fp.tell() except (AttributeError, OSError): self.fp = _Tellable(self.fp) self.start_dir = 0 self._seekable = False else: # Some file-like objects can provide tell() but not seek() try: self.fp.seek(self.start_dir) except (AttributeError, OSError): self._seekable = False elif mode == 'a': try: # See if file is a zip file self._RealGetContents() # seek to start of directory and overwrite self.fp.seek(self.start_dir) except BadZipFile: # file is not a zip file, just append self.fp.seek(0, 2) # set the modified flag so central directory gets written # even if no files are added to the archive self._didModify = True self.start_dir = self.fp.tell() else: raise ValueError("Mode must be 'r', 'w', 'x', or 'a'") except: fp = self.fp self.fp = None self._fpclose(fp) raise def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __repr__(self): result = ['<%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)] if self.fp is not None: if self._filePassed: result.append(' file=%r' % self.fp) elif self.filename is not None: result.append(' filename=%r' % self.filename) result.append(' mode=%r' % self.mode) else: result.append(' [closed]') result.append('>') return ''.join(result) def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp try: endrec = _EndRecData(fp) except OSError: raise BadZipFile("File is not a zip file") if not endrec: raise BadZipFile("File is not a zip file") if self.debug > 1: print(endrec) size_cd = endrec[_ECD_SIZE] # bytes in central directory offset_cd = endrec[_ECD_OFFSET] # offset of central directory self._comment = endrec[_ECD_COMMENT] # archive comment # "concat" is zero, unless zip was concatenated to another file concat = endrec[_ECD_LOCATION] - size_cd - offset_cd if endrec[_ECD_SIGNATURE] == stringEndArchive64: # If Zip64 extension structures are present, account for them concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) if self.debug > 2: inferred = concat + offset_cd print("given, inferred, offset", offset_cd, inferred, concat) # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat if self.start_dir < 0: raise BadZipFile("Bad offset for central directory") fp.seek(self.start_dir, 0) data = fp.read(size_cd) fp = io.BytesIO(data) total = 0 while total < size_cd: centdir = fp.read(sizeCentralDir) if len(centdir) != sizeCentralDir: raise BadZipFile("Truncated central directory") centdir = struct.unpack(structCentralDir, centdir) if centdir[_CD_SIGNATURE] != stringCentralDir: raise BadZipFile("Bad magic number for central directory") if self.debug > 2: print(centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) flags = centdir[5] if flags & 0x800: # UTF-8 file names extension filename = filename.decode('utf-8') else: # Historical ZIP filename encoding filename = filename.decode('cp437') # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] if x.extract_version > MAX_EXTRACT_VERSION: raise NotImplementedError("zip file version %.1f" % (x.extract_version / 10)) x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x._raw_time = t x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) x._decodeExtra() x.header_offset = x.header_offset + concat self.filelist.append(x) self.NameToInfo[x.filename] = x # update total bytes read from central directory total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) if self.debug > 2: print("total", total) end_offset = self.start_dir for zinfo in sorted(self.filelist, key=lambda zinfo: zinfo.header_offset, reverse=True): zinfo._end_offset = end_offset end_offset = zinfo.header_offset def namelist(self): """Return a list of file names in the archive.""" return [data.filename for data in self.filelist] def infolist(self): """Return a list of class ZipInfo instances for files in the archive.""" return self.filelist def printdir(self, file=None): """Print a table of contents for the zip file.""" print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), file=file) for zinfo in self.filelist: date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), file=file) def testzip(self): """Read all the files and check the CRC.""" chunk_size = 2 ** 20 for zinfo in self.filelist: try: # Read by chunks, to avoid an OverflowError or a # MemoryError with very large embedded files. with self.open(zinfo.filename, "r") as f: while f.read(chunk_size): # Check CRC-32 pass except BadZipFile: return zinfo.filename def getinfo(self, name): """Return the instance of ZipInfo given 'name'.""" info = self.NameToInfo.get(name) if info is None: raise KeyError( 'There is no item named %r in the archive' % name) return info def setpassword(self, pwd): """Set default password for encrypted files.""" if pwd and not isinstance(pwd, bytes): raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) if pwd: self.pwd = pwd else: self.pwd = None @property def comment(self): """The comment text associated with the ZIP file.""" return self._comment @comment.setter def comment(self, comment): if not isinstance(comment, bytes): raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) # check for valid comment length if len(comment) > ZIP_MAX_COMMENT: import warnings warnings.warn('Archive comment is too long; truncating to %d bytes' % ZIP_MAX_COMMENT, stacklevel=2) comment = comment[:ZIP_MAX_COMMENT] self._comment = comment self._didModify = True def read(self, name, pwd=None): """Return file bytes for name.""" with self.open(name, "r", pwd) as fp: return fp.read() def open(self, name, mode="r", pwd=None, *, force_zip64=False): """Return file-like object for 'name'. name is a string for the file name within the ZIP file, or a ZipInfo object. mode should be 'r' to read a file already in the ZIP file, or 'w' to write to a file newly added to the archive. pwd is the password to decrypt files (only used for reading). When writing, if the file size is not known in advance but may exceed 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large files. If the size is known in advance, it is best to pass a ZipInfo instance for name, with zinfo.file_size set. """ if mode not in {"r", "w"}: raise ValueError('open() requires mode "r" or "w"') if pwd and not isinstance(pwd, bytes): raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) if pwd and (mode == "w"): raise ValueError("pwd is only supported for reading files") if not self.fp: raise ValueError( "Attempt to use ZIP archive that was already closed") # Make sure we have an info object if isinstance(name, ZipInfo): # 'name' is already an info object zinfo = name elif mode == 'w': zinfo = ZipInfo(name) zinfo.compress_type = self.compression zinfo._compresslevel = self.compresslevel else: # Get info object for name zinfo = self.getinfo(name) if mode == 'w': return self._open_to_write(zinfo, force_zip64=force_zip64) if self._writing: raise ValueError("Can't read from the ZIP file while there " "is an open writing handle on it. " "Close the writing handle before trying to read.") # Open for reading: self._fileRefCnt += 1 zef_file = _SharedFile(self.fp, zinfo.header_offset, self._fpclose, self._lock, lambda: self._writing) try: # Skip the file header: fheader = zef_file.read(sizeFileHeader) if len(fheader) != sizeFileHeader: raise BadZipFile("Truncated file header") fheader = struct.unpack(structFileHeader, fheader) if fheader[_FH_SIGNATURE] != stringFileHeader: raise BadZipFile("Bad magic number for file header") fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) if fheader[_FH_EXTRA_FIELD_LENGTH]: zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) if zinfo.flag_bits & 0x20: # Zip 2.7: compressed patched data raise NotImplementedError("compressed patched data (flag bit 5)") if zinfo.flag_bits & 0x40: # strong encryption raise NotImplementedError("strong encryption (flag bit 6)") if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & 0x800: # UTF-8 filename fname_str = fname.decode("utf-8") else: fname_str = fname.decode("cp437") if fname_str != zinfo.orig_filename: raise BadZipFile( 'File name in directory %r and header %r differ.' % (zinfo.orig_filename, fname)) if (zinfo._end_offset is not None and zef_file.tell() + zinfo.compress_size > zinfo._end_offset): raise BadZipFile(f"Overlapped entries: {zinfo.orig_filename!r} (possible zip bomb)") # check for encrypted flag & handle password is_encrypted = zinfo.flag_bits & 0x1 if is_encrypted: if not pwd: pwd = self.pwd if not pwd: raise RuntimeError("File %r is encrypted, password " "required for extraction" % name) else: pwd = None return ZipExtFile(zef_file, mode, zinfo, pwd, True) except: zef_file.close() raise def _open_to_write(self, zinfo, force_zip64=False): if force_zip64 and not self._allowZip64: raise ValueError( "force_zip64 is True, but allowZip64 was False when opening " "the ZIP file." ) if self._writing: raise ValueError("Can't write to the ZIP file while there is " "another write handle open on it. " "Close the first handle before opening another.") # Size and CRC are overwritten with correct data after processing the file zinfo.compress_size = 0 zinfo.CRC = 0 zinfo.flag_bits = 0x00 if zinfo.compress_type == ZIP_LZMA: # Compressed data includes an end-of-stream (EOS) marker zinfo.flag_bits |= 0x02 if not self._seekable: zinfo.flag_bits |= 0x08 if not zinfo.external_attr: zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- # Compressed size can be larger than uncompressed size zip64 = self._allowZip64 and \ (force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT) if self._seekable: self.fp.seek(self.start_dir) zinfo.header_offset = self.fp.tell() self._writecheck(zinfo) self._didModify = True self.fp.write(zinfo.FileHeader(zip64)) self._writing = True return _ZipWriteFile(self, zinfo, zip64) def extract(self, member, path=None, pwd=None): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a ZipInfo object. You can specify a different directory using `path'. """ if path is None: path = os.getcwd() else: path = os.fspath(path) return self._extract_member(member, path, pwd) def extractall(self, path=None, members=None, pwd=None): """Extract all members from the archive to the current working directory. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by namelist(). """ if members is None: members = self.namelist() if path is None: path = os.getcwd() else: path = os.fspath(path) for zipinfo in members: self._extract_member(zipinfo, path, pwd) @classmethod def _sanitize_windows_name(cls, arcname, pathsep): """Replace bad characters and remove trailing dots from parts.""" table = cls._windows_illegal_name_trans_table if not table: illegal = ':<>|"?*' table = str.maketrans(illegal, '_' * len(illegal)) cls._windows_illegal_name_trans_table = table arcname = arcname.translate(table) # remove trailing dots arcname = (x.rstrip('.') for x in arcname.split(pathsep)) # rejoin, removing empty parts. arcname = pathsep.join(x for x in arcname if x) return arcname def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ if not isinstance(member, ZipInfo): member = self.getinfo(member) # build the destination pathname, replacing # forward slashes to platform specific separators. arcname = member.filename.replace('/', os.path.sep) if os.path.altsep: arcname = arcname.replace(os.path.altsep, os.path.sep) # interpret absolute pathname as relative, remove drive letter or # UNC path, redundant separators, "." and ".." components. arcname = os.path.splitdrive(arcname)[1] invalid_path_parts = ('', os.path.curdir, os.path.pardir) arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) if x not in invalid_path_parts) if os.path.sep == '\\': # filter illegal characters on Windows arcname = self._sanitize_windows_name(arcname, os.path.sep) targetpath = os.path.join(targetpath, arcname) targetpath = os.path.normpath(targetpath) # Create all upper directories if necessary. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): os.makedirs(upperdirs) if member.is_dir(): if not os.path.isdir(targetpath): os.mkdir(targetpath) return targetpath with self.open(member, pwd=pwd) as source, \ open(targetpath, "wb") as target: shutil.copyfileobj(source, target) return targetpath def _writecheck(self, zinfo): """Check for errors before writing a file to the archive.""" if zinfo.filename in self.NameToInfo: import warnings warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) if self.mode not in ('w', 'x', 'a'): raise ValueError("write() requires mode 'w', 'x', or 'a'") if not self.fp: raise ValueError( "Attempt to write ZIP archive that was already closed") _check_compression(zinfo.compress_type) if not self._allowZip64: requires_zip64 = None if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: requires_zip64 = "Files count" elif zinfo.file_size > ZIP64_LIMIT: requires_zip64 = "Filesize" elif zinfo.header_offset > ZIP64_LIMIT: requires_zip64 = "Zipfile size" if requires_zip64: raise LargeZipFile(requires_zip64 + " would require ZIP64 extensions") def write(self, filename, arcname=None, compress_type=None, compresslevel=None): """Put the bytes from filename into the archive under the name arcname.""" if not self.fp: raise ValueError( "Attempt to write to ZIP archive that was already closed") if self._writing: raise ValueError( "Can't write to ZIP archive while an open writing handle exists" ) zinfo = ZipInfo.from_file(filename, arcname, strict_timestamps=self._strict_timestamps) if zinfo.is_dir(): zinfo.compress_size = 0 zinfo.CRC = 0 else: if compress_type is not None: zinfo.compress_type = compress_type else: zinfo.compress_type = self.compression if compresslevel is not None: zinfo._compresslevel = compresslevel else: zinfo._compresslevel = self.compresslevel if zinfo.is_dir(): with self._lock: if self._seekable: self.fp.seek(self.start_dir) zinfo.header_offset = self.fp.tell() # Start of header bytes if zinfo.compress_type == ZIP_LZMA: # Compressed data includes an end-of-stream (EOS) marker zinfo.flag_bits |= 0x02 self._writecheck(zinfo) self._didModify = True self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) self.start_dir = self.fp.tell() else: with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: shutil.copyfileobj(src, dest, 1024*8) def writestr(self, zinfo_or_arcname, data, compress_type=None, compresslevel=None): """Write a file into the archive. The contents is 'data', which may be either a 'str' or a 'bytes' instance; if it is a 'str', it is encoded as UTF-8 first. 'zinfo_or_arcname' is either a ZipInfo instance or the name of the file in the archive.""" if isinstance(data, str): data = data.encode("utf-8") if not isinstance(zinfo_or_arcname, ZipInfo): zinfo = ZipInfo(filename=zinfo_or_arcname, date_time=time.localtime(time.time())[:6]) zinfo.compress_type = self.compression zinfo._compresslevel = self.compresslevel if zinfo.filename[-1] == '/': zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x zinfo.external_attr |= 0x10 # MS-DOS directory flag else: zinfo.external_attr = 0o600 << 16 # ?rw------- else: zinfo = zinfo_or_arcname if not self.fp: raise ValueError( "Attempt to write to ZIP archive that was already closed") if self._writing: raise ValueError( "Can't write to ZIP archive while an open writing handle exists." ) if compress_type is not None: zinfo.compress_type = compress_type if compresslevel is not None: zinfo._compresslevel = compresslevel zinfo.file_size = len(data) # Uncompressed size with self._lock: with self.open(zinfo, mode='w') as dest: dest.write(data) def __del__(self): """Call the "close()" method in case the user forgot.""" self.close() def close(self): """Close the file, and for mode 'w', 'x' and 'a' write the ending records.""" if self.fp is None: return if self._writing: raise ValueError("Can't close the ZIP file while there is " "an open writing handle on it. " "Close the writing handle before closing the zip.") try: if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records with self._lock: if self._seekable: self.fp.seek(self.start_dir) self._write_end_record() finally: fp = self.fp self.fp = None self._fpclose(fp) def _write_end_record(self): for zinfo in self.filelist: # write central directory dt = zinfo.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) extra = [] if zinfo.file_size > ZIP64_LIMIT \ or zinfo.compress_size > ZIP64_LIMIT: extra.append(zinfo.file_size) extra.append(zinfo.compress_size) file_size = 0xffffffff compress_size = 0xffffffff else: file_size = zinfo.file_size compress_size = zinfo.compress_size if zinfo.header_offset > ZIP64_LIMIT: extra.append(zinfo.header_offset) header_offset = 0xffffffff else: header_offset = zinfo.header_offset extra_data = zinfo.extra min_version = 0 if extra: # Append a ZIP64 field to the extra's extra_data = _strip_extra(extra_data, (1,)) extra_data = struct.pack( '<HH' + 'Q'*len(extra), 1, 8*len(extra), *extra) + extra_data min_version = ZIP64_VERSION if zinfo.compress_type == ZIP_BZIP2: min_version = max(BZIP2_VERSION, min_version) elif zinfo.compress_type == ZIP_LZMA: min_version = max(LZMA_VERSION, min_version) extract_version = max(min_version, zinfo.extract_version) create_version = max(min_version, zinfo.create_version) filename, flag_bits = zinfo._encodeFilenameFlags() centdir = struct.pack(structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset) self.fp.write(centdir) self.fp.write(filename) self.fp.write(extra_data) self.fp.write(zinfo.comment) pos2 = self.fp.tell() # Write end-of-zip-archive record centDirCount = len(self.filelist) centDirSize = pos2 - self.start_dir centDirOffset = self.start_dir requires_zip64 = None if centDirCount > ZIP_FILECOUNT_LIMIT: requires_zip64 = "Files count" elif centDirOffset > ZIP64_LIMIT: requires_zip64 = "Central directory offset" elif centDirSize > ZIP64_LIMIT: requires_zip64 = "Central directory size" if requires_zip64: # Need to write the ZIP64 end-of-archive records if not self._allowZip64: raise LargeZipFile(requires_zip64 + " would require ZIP64 extensions") zip64endrec = struct.pack( structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset) self.fp.write(zip64endrec) zip64locrec = struct.pack( structEndArchive64Locator, stringEndArchive64Locator, 0, pos2, 1) self.fp.write(zip64locrec) centDirCount = min(centDirCount, 0xFFFF) centDirSize = min(centDirSize, 0xFFFFFFFF) centDirOffset = min(centDirOffset, 0xFFFFFFFF) endrec = struct.pack(structEndArchive, stringEndArchive, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset, len(self._comment)) self.fp.write(endrec) self.fp.write(self._comment) if self.mode == "a": self.fp.truncate() self.fp.flush() def _fpclose(self, fp): assert self._fileRefCnt > 0 self._fileRefCnt -= 1 if not self._fileRefCnt and not self._filePassed: fp.close()
(file, mode='r', compression=0, allowZip64=True, compresslevel=None, *, strict_timestamps=True)
52,348
zipfile
_RealGetContents
Read in the table of contents for the ZIP file.
def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp try: endrec = _EndRecData(fp) except OSError: raise BadZipFile("File is not a zip file") if not endrec: raise BadZipFile("File is not a zip file") if self.debug > 1: print(endrec) size_cd = endrec[_ECD_SIZE] # bytes in central directory offset_cd = endrec[_ECD_OFFSET] # offset of central directory self._comment = endrec[_ECD_COMMENT] # archive comment # "concat" is zero, unless zip was concatenated to another file concat = endrec[_ECD_LOCATION] - size_cd - offset_cd if endrec[_ECD_SIGNATURE] == stringEndArchive64: # If Zip64 extension structures are present, account for them concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) if self.debug > 2: inferred = concat + offset_cd print("given, inferred, offset", offset_cd, inferred, concat) # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat if self.start_dir < 0: raise BadZipFile("Bad offset for central directory") fp.seek(self.start_dir, 0) data = fp.read(size_cd) fp = io.BytesIO(data) total = 0 while total < size_cd: centdir = fp.read(sizeCentralDir) if len(centdir) != sizeCentralDir: raise BadZipFile("Truncated central directory") centdir = struct.unpack(structCentralDir, centdir) if centdir[_CD_SIGNATURE] != stringCentralDir: raise BadZipFile("Bad magic number for central directory") if self.debug > 2: print(centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) flags = centdir[5] if flags & 0x800: # UTF-8 file names extension filename = filename.decode('utf-8') else: # Historical ZIP filename encoding filename = filename.decode('cp437') # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] if x.extract_version > MAX_EXTRACT_VERSION: raise NotImplementedError("zip file version %.1f" % (x.extract_version / 10)) x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x._raw_time = t x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) x._decodeExtra() x.header_offset = x.header_offset + concat self.filelist.append(x) self.NameToInfo[x.filename] = x # update total bytes read from central directory total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) if self.debug > 2: print("total", total) end_offset = self.start_dir for zinfo in sorted(self.filelist, key=lambda zinfo: zinfo.header_offset, reverse=True): zinfo._end_offset = end_offset end_offset = zinfo.header_offset
(self)
52,352
zipfile
__init__
Open the ZIP file with mode read 'r', write 'w', exclusive create 'x', or append 'a'.
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True, compresslevel=None, *, strict_timestamps=True): """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x', or append 'a'.""" if mode not in ('r', 'w', 'x', 'a'): raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'") _check_compression(compression) self._allowZip64 = allowZip64 self._didModify = False self.debug = 0 # Level of printing: 0 through 3 self.NameToInfo = {} # Find file info given name self.filelist = [] # List of ZipInfo instances for archive self.compression = compression # Method of compression self.compresslevel = compresslevel self.mode = mode self.pwd = None self._comment = b'' self._strict_timestamps = strict_timestamps # Check if we were passed a file-like object if isinstance(file, os.PathLike): file = os.fspath(file) if isinstance(file, str): # No, it's a filename self._filePassed = 0 self.filename = file modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b', 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'} filemode = modeDict[mode] while True: try: self.fp = io.open(file, filemode) except OSError: if filemode in modeDict: filemode = modeDict[filemode] continue raise break else: self._filePassed = 1 self.fp = file self.filename = getattr(file, 'name', None) self._fileRefCnt = 1 self._lock = threading.RLock() self._seekable = True self._writing = False try: if mode == 'r': self._RealGetContents() elif mode in ('w', 'x'): # set the modified flag so central directory gets written # even if no files are added to the archive self._didModify = True try: self.start_dir = self.fp.tell() except (AttributeError, OSError): self.fp = _Tellable(self.fp) self.start_dir = 0 self._seekable = False else: # Some file-like objects can provide tell() but not seek() try: self.fp.seek(self.start_dir) except (AttributeError, OSError): self._seekable = False elif mode == 'a': try: # See if file is a zip file self._RealGetContents() # seek to start of directory and overwrite self.fp.seek(self.start_dir) except BadZipFile: # file is not a zip file, just append self.fp.seek(0, 2) # set the modified flag so central directory gets written # even if no files are added to the archive self._didModify = True self.start_dir = self.fp.tell() else: raise ValueError("Mode must be 'r', 'w', 'x', or 'a'") except: fp = self.fp self.fp = None self._fpclose(fp) raise
(self, file, mode='r', compression=0, allowZip64=True, compresslevel=None, *, strict_timestamps=True)
52,354
zipfile
_extract_member
Extract the ZipInfo object 'member' to a physical file on the path targetpath.
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ if not isinstance(member, ZipInfo): member = self.getinfo(member) # build the destination pathname, replacing # forward slashes to platform specific separators. arcname = member.filename.replace('/', os.path.sep) if os.path.altsep: arcname = arcname.replace(os.path.altsep, os.path.sep) # interpret absolute pathname as relative, remove drive letter or # UNC path, redundant separators, "." and ".." components. arcname = os.path.splitdrive(arcname)[1] invalid_path_parts = ('', os.path.curdir, os.path.pardir) arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) if x not in invalid_path_parts) if os.path.sep == '\\': # filter illegal characters on Windows arcname = self._sanitize_windows_name(arcname, os.path.sep) targetpath = os.path.join(targetpath, arcname) targetpath = os.path.normpath(targetpath) # Create all upper directories if necessary. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): os.makedirs(upperdirs) if member.is_dir(): if not os.path.isdir(targetpath): os.mkdir(targetpath) return targetpath with self.open(member, pwd=pwd) as source, \ open(targetpath, "wb") as target: shutil.copyfileobj(source, target) return targetpath
(self, member, targetpath, pwd)
52,356
zipfile
_open_to_write
null
def _open_to_write(self, zinfo, force_zip64=False): if force_zip64 and not self._allowZip64: raise ValueError( "force_zip64 is True, but allowZip64 was False when opening " "the ZIP file." ) if self._writing: raise ValueError("Can't write to the ZIP file while there is " "another write handle open on it. " "Close the first handle before opening another.") # Size and CRC are overwritten with correct data after processing the file zinfo.compress_size = 0 zinfo.CRC = 0 zinfo.flag_bits = 0x00 if zinfo.compress_type == ZIP_LZMA: # Compressed data includes an end-of-stream (EOS) marker zinfo.flag_bits |= 0x02 if not self._seekable: zinfo.flag_bits |= 0x08 if not zinfo.external_attr: zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- # Compressed size can be larger than uncompressed size zip64 = self._allowZip64 and \ (force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT) if self._seekable: self.fp.seek(self.start_dir) zinfo.header_offset = self.fp.tell() self._writecheck(zinfo) self._didModify = True self.fp.write(zinfo.FileHeader(zip64)) self._writing = True return _ZipWriteFile(self, zinfo, zip64)
(self, zinfo, force_zip64=False)
52,357
zipfile
_write_end_record
null
def _write_end_record(self): for zinfo in self.filelist: # write central directory dt = zinfo.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) extra = [] if zinfo.file_size > ZIP64_LIMIT \ or zinfo.compress_size > ZIP64_LIMIT: extra.append(zinfo.file_size) extra.append(zinfo.compress_size) file_size = 0xffffffff compress_size = 0xffffffff else: file_size = zinfo.file_size compress_size = zinfo.compress_size if zinfo.header_offset > ZIP64_LIMIT: extra.append(zinfo.header_offset) header_offset = 0xffffffff else: header_offset = zinfo.header_offset extra_data = zinfo.extra min_version = 0 if extra: # Append a ZIP64 field to the extra's extra_data = _strip_extra(extra_data, (1,)) extra_data = struct.pack( '<HH' + 'Q'*len(extra), 1, 8*len(extra), *extra) + extra_data min_version = ZIP64_VERSION if zinfo.compress_type == ZIP_BZIP2: min_version = max(BZIP2_VERSION, min_version) elif zinfo.compress_type == ZIP_LZMA: min_version = max(LZMA_VERSION, min_version) extract_version = max(min_version, zinfo.extract_version) create_version = max(min_version, zinfo.create_version) filename, flag_bits = zinfo._encodeFilenameFlags() centdir = struct.pack(structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset) self.fp.write(centdir) self.fp.write(filename) self.fp.write(extra_data) self.fp.write(zinfo.comment) pos2 = self.fp.tell() # Write end-of-zip-archive record centDirCount = len(self.filelist) centDirSize = pos2 - self.start_dir centDirOffset = self.start_dir requires_zip64 = None if centDirCount > ZIP_FILECOUNT_LIMIT: requires_zip64 = "Files count" elif centDirOffset > ZIP64_LIMIT: requires_zip64 = "Central directory offset" elif centDirSize > ZIP64_LIMIT: requires_zip64 = "Central directory size" if requires_zip64: # Need to write the ZIP64 end-of-archive records if not self._allowZip64: raise LargeZipFile(requires_zip64 + " would require ZIP64 extensions") zip64endrec = struct.pack( structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset) self.fp.write(zip64endrec) zip64locrec = struct.pack( structEndArchive64Locator, stringEndArchive64Locator, 0, pos2, 1) self.fp.write(zip64locrec) centDirCount = min(centDirCount, 0xFFFF) centDirSize = min(centDirSize, 0xFFFFFFFF) centDirOffset = min(centDirOffset, 0xFFFFFFFF) endrec = struct.pack(structEndArchive, stringEndArchive, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset, len(self._comment)) self.fp.write(endrec) self.fp.write(self._comment) if self.mode == "a": self.fp.truncate() self.fp.flush()
(self)
52,360
zipfile
extract
Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a ZipInfo object. You can specify a different directory using `path'.
def extract(self, member, path=None, pwd=None): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a ZipInfo object. You can specify a different directory using `path'. """ if path is None: path = os.getcwd() else: path = os.fspath(path) return self._extract_member(member, path, pwd)
(self, member, path=None, pwd=None)
52,361
zipfile
extractall
Extract all members from the archive to the current working directory. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by namelist().
def extractall(self, path=None, members=None, pwd=None): """Extract all members from the archive to the current working directory. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by namelist(). """ if members is None: members = self.namelist() if path is None: path = os.getcwd() else: path = os.fspath(path) for zipinfo in members: self._extract_member(zipinfo, path, pwd)
(self, path=None, members=None, pwd=None)
52,365
zipfile
open
Return file-like object for 'name'. name is a string for the file name within the ZIP file, or a ZipInfo object. mode should be 'r' to read a file already in the ZIP file, or 'w' to write to a file newly added to the archive. pwd is the password to decrypt files (only used for reading). When writing, if the file size is not known in advance but may exceed 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large files. If the size is known in advance, it is best to pass a ZipInfo instance for name, with zinfo.file_size set.
def open(self, name, mode="r", pwd=None, *, force_zip64=False): """Return file-like object for 'name'. name is a string for the file name within the ZIP file, or a ZipInfo object. mode should be 'r' to read a file already in the ZIP file, or 'w' to write to a file newly added to the archive. pwd is the password to decrypt files (only used for reading). When writing, if the file size is not known in advance but may exceed 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large files. If the size is known in advance, it is best to pass a ZipInfo instance for name, with zinfo.file_size set. """ if mode not in {"r", "w"}: raise ValueError('open() requires mode "r" or "w"') if pwd and not isinstance(pwd, bytes): raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) if pwd and (mode == "w"): raise ValueError("pwd is only supported for reading files") if not self.fp: raise ValueError( "Attempt to use ZIP archive that was already closed") # Make sure we have an info object if isinstance(name, ZipInfo): # 'name' is already an info object zinfo = name elif mode == 'w': zinfo = ZipInfo(name) zinfo.compress_type = self.compression zinfo._compresslevel = self.compresslevel else: # Get info object for name zinfo = self.getinfo(name) if mode == 'w': return self._open_to_write(zinfo, force_zip64=force_zip64) if self._writing: raise ValueError("Can't read from the ZIP file while there " "is an open writing handle on it. " "Close the writing handle before trying to read.") # Open for reading: self._fileRefCnt += 1 zef_file = _SharedFile(self.fp, zinfo.header_offset, self._fpclose, self._lock, lambda: self._writing) try: # Skip the file header: fheader = zef_file.read(sizeFileHeader) if len(fheader) != sizeFileHeader: raise BadZipFile("Truncated file header") fheader = struct.unpack(structFileHeader, fheader) if fheader[_FH_SIGNATURE] != stringFileHeader: raise BadZipFile("Bad magic number for file header") fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) if fheader[_FH_EXTRA_FIELD_LENGTH]: zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) if zinfo.flag_bits & 0x20: # Zip 2.7: compressed patched data raise NotImplementedError("compressed patched data (flag bit 5)") if zinfo.flag_bits & 0x40: # strong encryption raise NotImplementedError("strong encryption (flag bit 6)") if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & 0x800: # UTF-8 filename fname_str = fname.decode("utf-8") else: fname_str = fname.decode("cp437") if fname_str != zinfo.orig_filename: raise BadZipFile( 'File name in directory %r and header %r differ.' % (zinfo.orig_filename, fname)) if (zinfo._end_offset is not None and zef_file.tell() + zinfo.compress_size > zinfo._end_offset): raise BadZipFile(f"Overlapped entries: {zinfo.orig_filename!r} (possible zip bomb)") # check for encrypted flag & handle password is_encrypted = zinfo.flag_bits & 0x1 if is_encrypted: if not pwd: pwd = self.pwd if not pwd: raise RuntimeError("File %r is encrypted, password " "required for extraction" % name) else: pwd = None return ZipExtFile(zef_file, mode, zinfo, pwd, True) except: zef_file.close() raise
(self, name, mode='r', pwd=None, *, force_zip64=False)
52,367
zipfile
read
Return file bytes for name.
def read(self, name, pwd=None): """Return file bytes for name.""" with self.open(name, "r", pwd) as fp: return fp.read()
(self, name, pwd=None)
52,370
zipfile
write
Put the bytes from filename into the archive under the name arcname.
def write(self, filename, arcname=None, compress_type=None, compresslevel=None): """Put the bytes from filename into the archive under the name arcname.""" if not self.fp: raise ValueError( "Attempt to write to ZIP archive that was already closed") if self._writing: raise ValueError( "Can't write to ZIP archive while an open writing handle exists" ) zinfo = ZipInfo.from_file(filename, arcname, strict_timestamps=self._strict_timestamps) if zinfo.is_dir(): zinfo.compress_size = 0 zinfo.CRC = 0 else: if compress_type is not None: zinfo.compress_type = compress_type else: zinfo.compress_type = self.compression if compresslevel is not None: zinfo._compresslevel = compresslevel else: zinfo._compresslevel = self.compresslevel if zinfo.is_dir(): with self._lock: if self._seekable: self.fp.seek(self.start_dir) zinfo.header_offset = self.fp.tell() # Start of header bytes if zinfo.compress_type == ZIP_LZMA: # Compressed data includes an end-of-stream (EOS) marker zinfo.flag_bits |= 0x02 self._writecheck(zinfo) self._didModify = True self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) self.start_dir = self.fp.tell() else: with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: shutil.copyfileobj(src, dest, 1024*8)
(self, filename, arcname=None, compress_type=None, compresslevel=None)
52,371
zipfile
writestr
Write a file into the archive. The contents is 'data', which may be either a 'str' or a 'bytes' instance; if it is a 'str', it is encoded as UTF-8 first. 'zinfo_or_arcname' is either a ZipInfo instance or the name of the file in the archive.
def writestr(self, zinfo_or_arcname, data, compress_type=None, compresslevel=None): """Write a file into the archive. The contents is 'data', which may be either a 'str' or a 'bytes' instance; if it is a 'str', it is encoded as UTF-8 first. 'zinfo_or_arcname' is either a ZipInfo instance or the name of the file in the archive.""" if isinstance(data, str): data = data.encode("utf-8") if not isinstance(zinfo_or_arcname, ZipInfo): zinfo = ZipInfo(filename=zinfo_or_arcname, date_time=time.localtime(time.time())[:6]) zinfo.compress_type = self.compression zinfo._compresslevel = self.compresslevel if zinfo.filename[-1] == '/': zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x zinfo.external_attr |= 0x10 # MS-DOS directory flag else: zinfo.external_attr = 0o600 << 16 # ?rw------- else: zinfo = zinfo_or_arcname if not self.fp: raise ValueError( "Attempt to write to ZIP archive that was already closed") if self._writing: raise ValueError( "Can't write to ZIP archive while an open writing handle exists." ) if compress_type is not None: zinfo.compress_type = compress_type if compresslevel is not None: zinfo._compresslevel = compresslevel zinfo.file_size = len(data) # Uncompressed size with self._lock: with self.open(zinfo, mode='w') as dest: dest.write(data)
(self, zinfo_or_arcname, data, compress_type=None, compresslevel=None)
52,372
pgeocode
_open_extract_cycle_url
Same as _open_extract_url but cycle through URLs until one works We start by opening the first URL in the list, and if fails move to the next, until one works or the end of list is reached.
def _index_postal_codes(self) -> pd.DataFrame: """Create a dataframe with unique postal codes""" data_path_unique = self._data_path.replace(".txt", "-index.txt") if os.path.exists(data_path_unique): data_unique = pd.read_csv( data_path_unique, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) else: # group together places with the same postal code df_unique_cp_group = self._data.groupby("postal_code") data_unique = df_unique_cp_group[["latitude", "longitude"]].mean() valid_keys = set(DATA_FIELDS).difference( ["place_name", "latitude", "longitude", "postal_code"] ) data_unique["place_name"] = df_unique_cp_group["place_name"].apply( lambda x: ", ".join([str(el) for el in x]) ) for key in valid_keys: data_unique[key] = df_unique_cp_group[key].first() data_unique = data_unique.reset_index()[DATA_FIELDS] data_unique.to_csv(data_path_unique, index=None) return data_unique
(urls: list[str], country: str) -> Any
52,373
pgeocode
_open_extract_url
Download contents for a URL If the file has a .zip extension, open it and extract the country Returns the opened file object.
def _index_postal_codes(self) -> pd.DataFrame: """Create a dataframe with unique postal codes""" data_path_unique = self._data_path.replace(".txt", "-index.txt") if os.path.exists(data_path_unique): data_unique = pd.read_csv( data_path_unique, dtype={"postal_code": str}, na_values=NA_VALUES, keep_default_na=False, ) else: # group together places with the same postal code df_unique_cp_group = self._data.groupby("postal_code") data_unique = df_unique_cp_group[["latitude", "longitude"]].mean() valid_keys = set(DATA_FIELDS).difference( ["place_name", "latitude", "longitude", "postal_code"] ) data_unique["place_name"] = df_unique_cp_group["place_name"].apply( lambda x: ", ".join([str(el) for el in x]) ) for key in valid_keys: data_unique[key] = df_unique_cp_group[key].first() data_unique = data_unique.reset_index()[DATA_FIELDS] data_unique.to_csv(data_path_unique, index=None) return data_unique
(url: str, country: str) -> Any
52,375
pgeocode
haversine_distance
Haversine (great circle) distance Calculate the great circle distance between two points on the earth (specified in decimal degrees) Parameters ---------- x : array, shape=(n_samples, 2) the first list of coordinates (degrees) y : array: shape=(n_samples, 2) the second list of coordinates (degress) Returns ------- d : array, shape=(n_samples,) the distance between corrdinates (km) References ---------- https://en.wikipedia.org/wiki/Great-circle_distance
def haversine_distance(x, y): """Haversine (great circle) distance Calculate the great circle distance between two points on the earth (specified in decimal degrees) Parameters ---------- x : array, shape=(n_samples, 2) the first list of coordinates (degrees) y : array: shape=(n_samples, 2) the second list of coordinates (degress) Returns ------- d : array, shape=(n_samples,) the distance between corrdinates (km) References ---------- https://en.wikipedia.org/wiki/Great-circle_distance """ x_rad = np.radians(x) y_rad = np.radians(y) d = y_rad - x_rad dlat, dlon = d.T x_lat = x_rad[:, 0] y_lat = y_rad[:, 0] a = ( np.sin(dlat / 2.0) ** 2 + np.cos(x_lat) * np.cos(y_lat) * np.sin(dlon / 2.0) ** 2 ) c = 2 * np.arcsin(np.sqrt(a)) return EARTH_RADIUS * c
(x, y)
52,381
zabbix_utils.exceptions
APINotSupported
Exception class when object/action is not supported by Zabbix API. Args: message (str): Not supported object/action message. version (str): Current version of Zabbix API.
class APINotSupported(ModuleBaseException): """Exception class when object/action is not supported by Zabbix API. Args: message (str): Not supported object/action message. version (str): Current version of Zabbix API. """ def __init__(self, message: str, version: str = None): if version: message = f"{message} is unsupported for Zabbix {version} version" super().__init__(message)
(message: str, version: str = None)
52,382
zabbix_utils.exceptions
__init__
null
def __init__(self, message: str, version: str = None): if version: message = f"{message} is unsupported for Zabbix {version} version" super().__init__(message)
(self, message: str, version: Optional[str] = None)
52,383
zabbix_utils.exceptions
APIRequestError
Exception class when Zabbix API returns error by request. Args: api_error (str|dict): Raw error message from Zabbix API.
class APIRequestError(ModuleBaseException): """Exception class when Zabbix API returns error by request. Args: api_error (str|dict): Raw error message from Zabbix API. """ def __init__(self, api_error: Union[str, dict]): if isinstance(api_error, dict): api_error['body'] = ModuleUtils.hide_private(api_error['body']) super().__init__("{message} {data}".format(**api_error)) for key, value in api_error.items(): setattr(self, key, value) else: super().__init__(api_error)
(api_error: Union[str, dict])
52,384
zabbix_utils.exceptions
__init__
null
def __init__(self, api_error: Union[str, dict]): if isinstance(api_error, dict): api_error['body'] = ModuleUtils.hide_private(api_error['body']) super().__init__("{message} {data}".format(**api_error)) for key, value in api_error.items(): setattr(self, key, value) else: super().__init__(api_error)
(self, api_error: Union[str, dict])
52,385
zabbix_utils.types
APIVersion
Zabbix API version object. Args: apiver (str): Raw version in string format.
class APIVersion(): """Zabbix API version object. Args: apiver (str): Raw version in string format. """ def __init__(self, apiver: str): self.__raw = apiver self.__first, self.__second, self.__third = self.__parse_version(self.__raw) def __getitem__(self, index: int) -> Any: # Get a symbol from the raw version string by index # For compatibility with using Zabbix version as a string return self.__raw[index] def is_lts(self) -> bool: """Check if the current version is LTS. Returns: bool: `True` if the current version is LTS. """ return self.__second == 0 @property def major(self) -> float: """Get major version number. Returns: float: A major version number. """ return float(f"{self.__first}.{self.__second}") @property def minor(self) -> int: """Get minor version number. Returns: int: A minor version number. """ return self.__third def __parse_version(self, ver: str) -> List[Any]: # Parse the version string into a list of integers. match = re.fullmatch(r'(\d+)\.(\d+)\.(\d+)', ver) if match is None: raise ValueError( f"Unable to parse version of Zabbix API: {ver}. " + f"Default '{__max_supported__}.0' format is expected." ) from None return list(map(int, match.groups())) def __str__(self) -> str: return self.__raw def __repr__(self) -> str: return self.__raw def __eq__(self, other: Union[float, str]) -> bool: if isinstance(other, float): return self.major == other if isinstance(other, str): return [self.__first, self.__second, self.__third] == self.__parse_version(other) raise TypeError( f"'==' not supported between instances of '{type(self).__name__}' and \ '{type(other).__name__}', only 'float' or 'str' is expected" ) def __gt__(self, other: Union[float, str]) -> bool: if isinstance(other, float): return self.major > other if isinstance(other, str): return [self.__first, self.__second, self.__third] > self.__parse_version(other) raise TypeError( f"'>' not supported between instances of '{type(self).__name__}' and \ '{type(other).__name__}', only 'float' or 'str' is expected" ) def __lt__(self, other: Union[float, str]) -> bool: if isinstance(other, float): return self.major < other if isinstance(other, str): return [self.__first, self.__second, self.__third] < self.__parse_version(other) raise TypeError( f"'<' not supported between instances of '{type(self).__name__}' and \ '{type(other).__name__}', only 'float' or 'str' is expected" ) def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __ge__(self, other: Any) -> bool: return not self.__lt__(other) def __le__(self, other: Any) -> bool: return not self.__gt__(other)
(apiver: str)
52,386
zabbix_utils.types
__parse_version
null
def __parse_version(self, ver: str) -> List[Any]: # Parse the version string into a list of integers. match = re.fullmatch(r'(\d+)\.(\d+)\.(\d+)', ver) if match is None: raise ValueError( f"Unable to parse version of Zabbix API: {ver}. " + f"Default '{__max_supported__}.0' format is expected." ) from None return list(map(int, match.groups()))
(self, ver: str) -> List[Any]
52,387
zabbix_utils.types
__eq__
null
def __eq__(self, other: Union[float, str]) -> bool: if isinstance(other, float): return self.major == other if isinstance(other, str): return [self.__first, self.__second, self.__third] == self.__parse_version(other) raise TypeError( f"'==' not supported between instances of '{type(self).__name__}' and \ '{type(other).__name__}', only 'float' or 'str' is expected" )
(self, other: Union[float, str]) -> bool
52,388
zabbix_utils.types
__ge__
null
def __ge__(self, other: Any) -> bool: return not self.__lt__(other)
(self, other: Any) -> bool
52,389
zabbix_utils.types
__getitem__
null
def __getitem__(self, index: int) -> Any: # Get a symbol from the raw version string by index # For compatibility with using Zabbix version as a string return self.__raw[index]
(self, index: int) -> Any
52,390
zabbix_utils.types
__gt__
null
def __gt__(self, other: Union[float, str]) -> bool: if isinstance(other, float): return self.major > other if isinstance(other, str): return [self.__first, self.__second, self.__third] > self.__parse_version(other) raise TypeError( f"'>' not supported between instances of '{type(self).__name__}' and \ '{type(other).__name__}', only 'float' or 'str' is expected" )
(self, other: Union[float, str]) -> bool
52,391
zabbix_utils.types
__init__
null
def __init__(self, apiver: str): self.__raw = apiver self.__first, self.__second, self.__third = self.__parse_version(self.__raw)
(self, apiver: str)
52,392
zabbix_utils.types
__le__
null
def __le__(self, other: Any) -> bool: return not self.__gt__(other)
(self, other: Any) -> bool
52,393
zabbix_utils.types
__lt__
null
def __lt__(self, other: Union[float, str]) -> bool: if isinstance(other, float): return self.major < other if isinstance(other, str): return [self.__first, self.__second, self.__third] < self.__parse_version(other) raise TypeError( f"'<' not supported between instances of '{type(self).__name__}' and \ '{type(other).__name__}', only 'float' or 'str' is expected" )
(self, other: Union[float, str]) -> bool
52,394
zabbix_utils.types
__ne__
null
def __ne__(self, other: Any) -> bool: return not self.__eq__(other)
(self, other: Any) -> bool
52,395
zabbix_utils.types
__repr__
null
def __repr__(self) -> str: return self.__raw
(self) -> str
52,396
zabbix_utils.types
__str__
null
def __str__(self) -> str: return self.__raw
(self) -> str
52,397
zabbix_utils.types
is_lts
Check if the current version is LTS. Returns: bool: `True` if the current version is LTS.
def is_lts(self) -> bool: """Check if the current version is LTS. Returns: bool: `True` if the current version is LTS. """ return self.__second == 0
(self) -> bool
52,398
zabbix_utils.aiogetter
AsyncGetter
Zabbix get asynchronous implementation. Args: host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`. port (int, optional): Zabbix agent port. Defaults to `10050`. timeout (int, optional): Connection timeout value. Defaults to `10`. source_ip (str, optional): IP from which to establish connection. Defaults to `None`. ssl_context (Callable, optional): Func(), returned prepared ssl.SSLContext. Defaults to `None`.
class AsyncGetter(): """Zabbix get asynchronous implementation. Args: host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`. port (int, optional): Zabbix agent port. Defaults to `10050`. timeout (int, optional): Connection timeout value. Defaults to `10`. source_ip (str, optional): IP from which to establish connection. Defaults to `None`. ssl_context (Callable, optional): Func(), returned prepared ssl.SSLContext. \ Defaults to `None`. """ def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, source_ip: Optional[str] = None, ssl_context: Optional[Callable] = None): self.host = host self.port = port self.timeout = timeout self.source_ip = source_ip self.ssl_context = ssl_context if self.ssl_context: if not isinstance(self.ssl_context, Callable): raise TypeError('Value "ssl_context" should be a function.') async def __get_response(self, reader: asyncio.StreamReader) -> Optional[str]: result = await ZabbixProtocol.parse_async_packet(reader, log, ProcessingError) log.debug('Received data: %s', result) return result async def get(self, key: str) -> Optional[str]: """Gets item value from Zabbix agent by specified key. Args: key (str): Zabbix item key. Returns: str: Value from Zabbix agent for specified key. """ packet = ZabbixProtocol.create_packet(key, log) connection_params = { "host": self.host, "port": self.port } if self.source_ip: connection_params['local_addr'] = (self.source_ip, 0) if self.ssl_context: connection_params['ssl'] = self.ssl_context() if not isinstance(connection_params['ssl'], ssl.SSLContext): raise TypeError( 'Function "ssl_context" must return "ssl.SSLContext".') from None connection = asyncio.open_connection(**connection_params) try: reader, writer = await asyncio.wait_for(connection, timeout=self.timeout) writer.write(packet) await writer.drain() except asyncio.TimeoutError as err: log.error( 'The connection to %s timed out after %d seconds', f"{self.host}:{self.port}", self.timeout ) raise err except (ConnectionRefusedError, socket.gaierror) as err: log.error( 'An error occurred while trying to connect to %s: %s', f"{self.host}:{self.port}", getattr(err, 'msg', str(err)) ) raise err except (OSError, socket.error) as err: log.warning( 'An error occurred while trying to send to %s: %s', f"{self.host}:{self.port}", getattr(err, 'msg', str(err)) ) raise err try: response = await self.__get_response(reader) except (ConnectionResetError, asyncio.exceptions.IncompleteReadError) as err: log.debug('Get value error: %s', err) log.warning('Check access restrictions in Zabbix agent configuration.') raise err log.debug('Response from [%s:%s]: %s', self.host, self.port, response) writer.close() await writer.wait_closed() return AgentResponse(response)
(host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, source_ip: Optional[str] = None, ssl_context: Optional[Callable] = None)
52,399
zabbix_utils.aiogetter
__get_response
null
def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, source_ip: Optional[str] = None, ssl_context: Optional[Callable] = None): self.host = host self.port = port self.timeout = timeout self.source_ip = source_ip self.ssl_context = ssl_context if self.ssl_context: if not isinstance(self.ssl_context, Callable): raise TypeError('Value "ssl_context" should be a function.')
(self, reader: asyncio.streams.StreamReader) -> Optional[str]
52,401
zabbix_utils.aiogetter
get
Gets item value from Zabbix agent by specified key. Args: key (str): Zabbix item key. Returns: str: Value from Zabbix agent for specified key.
def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, source_ip: Optional[str] = None, ssl_context: Optional[Callable] = None): self.host = host self.port = port self.timeout = timeout self.source_ip = source_ip self.ssl_context = ssl_context if self.ssl_context: if not isinstance(self.ssl_context, Callable): raise TypeError('Value "ssl_context" should be a function.')
(self, key: str) -> Optional[str]
52,402
zabbix_utils.aiosender
AsyncSender
Zabbix sender asynchronous implementation. Args: server (str, optional): Zabbix server address. Defaults to `'127.0.0.1'`. port (int, optional): Zabbix server port. Defaults to `10051`. use_config (bool, optional): Specifying configuration use. Defaults to `False`. timeout (int, optional): Connection timeout value. Defaults to `10`. use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`. source_ip (str, optional): IP from which to establish connection. Defaults to `None`. chunk_size (int, optional): Number of packets in one chunk. Defaults to `250`. clusters (tuple|list, optional): List of Zabbix clusters. Defaults to `None`. ssl_context (Callable, optional): Func(`tls`), returned prepared ssl.SSLContext. Defaults to `None`. compression (bool, optional): Specifying compression use. Defaults to `False`. config_path (str, optional): Path to Zabbix agent configuration file. Defaults to `/etc/zabbix/zabbix_agentd.conf`.
class AsyncSender(): """Zabbix sender asynchronous implementation. Args: server (str, optional): Zabbix server address. Defaults to `'127.0.0.1'`. port (int, optional): Zabbix server port. Defaults to `10051`. use_config (bool, optional): Specifying configuration use. Defaults to `False`. timeout (int, optional): Connection timeout value. Defaults to `10`. use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`. source_ip (str, optional): IP from which to establish connection. Defaults to `None`. chunk_size (int, optional): Number of packets in one chunk. Defaults to `250`. clusters (tuple|list, optional): List of Zabbix clusters. Defaults to `None`. ssl_context (Callable, optional): Func(`tls`), returned prepared ssl.SSLContext. \ Defaults to `None`. compression (bool, optional): Specifying compression use. Defaults to `False`. config_path (str, optional): Path to Zabbix agent configuration file. Defaults to \ `/etc/zabbix/zabbix_agentd.conf`. """ def __init__(self, server: Optional[str] = None, port: int = 10051, use_config: bool = False, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, chunk_size: int = 250, clusters: Union[tuple, list] = None, ssl_context: Optional[Callable] = None, compression: bool = False, config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf'): self.timeout = timeout self.use_ipv6 = use_ipv6 self.tls = {} self.source_ip = None self.chunk_size = chunk_size self.compression = compression if ssl_context is not None: if not isinstance(ssl_context, Callable): raise TypeError('Value "ssl_context" should be a function.') from None self.ssl_context = ssl_context if source_ip is not None: self.source_ip = source_ip if use_config: self.clusters = [] self.__load_config(config_path) return if clusters is not None: if not (isinstance(clusters, tuple) or isinstance(clusters, list)): raise TypeError('Value "clusters" should be a tuple or a list.') from None clusters = clusters.copy() if server is not None: clusters.append([f"{server}:{port}"]) self.clusters = [Cluster(c) for c in clusters] else: self.clusters = [Cluster([f"{server or '127.0.0.1'}:{port}"])] def __read_config(self, config: configparser.SectionProxy) -> None: server_row = config.get('ServerActive') or config.get('Server') or '127.0.0.1:10051' for cluster in server_row.split(','): self.clusters.append(Cluster(cluster.strip().split(';'))) if 'SourceIP' in config: self.source_ip = config.get('SourceIP') for key in config: if key.startswith('tls'): self.tls[key] = config.get(key) def __load_config(self, filepath: str) -> None: config = configparser.ConfigParser(strict=False) with open(filepath, 'r', encoding='utf-8') as cfg: config.read_string('[root]\n' + cfg.read()) self.__read_config(config['root']) async def __get_response(self, reader: asyncio.StreamReader) -> Optional[str]: try: result = json.loads( await ZabbixProtocol.parse_async_packet(reader, log, ProcessingError) ) except json.decoder.JSONDecodeError as err: log.debug('Unexpected response was received from Zabbix.') raise err log.debug('Received data: %s', result) return result def __create_request(self, items: list) -> dict: return { "request": "sender data", "data": [i.to_json() for i in items] } async def __chunk_send(self, items: list) -> dict: responses = {} packet = ZabbixProtocol.create_packet(self.__create_request(items), log, self.compression) for cluster in self.clusters: active_node = None for i, node in enumerate(cluster.nodes): log.debug('Trying to send data to %s', node) connection_params = { "host": node.address, "port": node.port } if self.source_ip: connection_params['local_addr'] = (self.source_ip, 0) if self.ssl_context is not None: connection_params['ssl'] = self.ssl_context(self.tls) if not isinstance(connection_params['ssl'], ssl.SSLContext): raise TypeError( 'Function "ssl_context" must return "ssl.SSLContext".') from None connection = asyncio.open_connection(**connection_params) try: reader, writer = await asyncio.wait_for(connection, timeout=self.timeout) except asyncio.TimeoutError: log.debug( 'The connection to %s timed out after %d seconds', node, self.timeout ) except (ConnectionRefusedError, socket.gaierror) as err: log.debug( 'An error occurred while trying to connect to %s: %s', node, getattr(err, 'msg', str(err)) ) else: if i > 0: cluster.nodes[0], cluster.nodes[i] = cluster.nodes[i], cluster.nodes[0] active_node = node break if active_node is None: log.error( 'Couldn\'t connect to all of cluster nodes: %s', str(list(cluster.nodes)) ) raise ProcessingError( f"Couldn't connect to all of cluster nodes: {list(cluster.nodes)}" ) try: writer.write(packet) send_data = writer.drain() await asyncio.wait_for(send_data, timeout=self.timeout) except (asyncio.TimeoutError, socket.timeout) as err: log.error( 'The connection to %s timed out after %d seconds while trying to send', active_node, self.timeout ) writer.close() await writer.wait_closed() raise err except (OSError, socket.error) as err: log.warning( 'An error occurred while trying to send to %s: %s', active_node, getattr(err, 'msg', str(err)) ) writer.close() await writer.wait_closed() raise err try: response = await self.__get_response(reader) except (ConnectionResetError, asyncio.exceptions.IncompleteReadError) as err: log.debug('Get value error: %s', err) raise err log.debug('Response from %s: %s', active_node, response) if response and response.get('response') != 'success': raise ProcessingError(response) from None responses[active_node] = response writer.close() await writer.wait_closed() return responses async def send(self, items: list) -> TrapperResponse: """Sends packets and receives an answer from Zabbix. Args: items (list): List of ItemValue objects. Returns: TrapperResponse: Response from Zabbix server/proxy. """ # Split the list of items into chunks of size self.chunk_size. chunks = [items[i:i + self.chunk_size] for i in range(0, len(items), self.chunk_size)] # Merge responses into a single TrapperResponse object. result = TrapperResponse() # TrapperResponse details for each node and chunk. result.details = {} for i, chunk in enumerate(chunks): if not all(isinstance(item, ItemValue) for item in chunk): log.debug('Received unexpected item list. It must be a list of \ ItemValue objects: %s', json.dumps(chunk)) raise ProcessingError(f"Received unexpected item list. \ It must be a list of ItemValue objects: {json.dumps(chunk)}") resp_by_node = await self.__chunk_send(chunk) node_step = 1 for node, resp in resp_by_node.items(): try: result.add(resp, (i + 1) * node_step) except ProcessingError as err: log.debug(err) raise ProcessingError(err) from None node_step += 1 if node not in result.details: result.details[node] = [] result.details[node].append(TrapperResponse(i+1).add(resp)) return result async def send_value(self, host: str, key: str, value: str, clock: Optional[int] = None, ns: Optional[int] = None) -> TrapperResponse: """Sends one value and receives an answer from Zabbix. Args: host (str): Specify host name the item belongs to (as registered in Zabbix frontend). key (str): Specify item key to send value to. value (str): Specify item value. clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`. ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`. Returns: TrapperResponse: Response from Zabbix server/proxy. """ return await self.send([ItemValue(host, key, value, clock, ns)])
(server: Optional[str] = None, port: int = 10051, use_config: bool = False, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, chunk_size: int = 250, clusters: Union[tuple, list] = None, ssl_context: Optional[Callable] = None, compression: bool = False, config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf')
52,403
zabbix_utils.aiosender
__chunk_send
null
def __create_request(self, items: list) -> dict: return { "request": "sender data", "data": [i.to_json() for i in items] }
(self, items: list) -> dict
52,405
zabbix_utils.aiosender
__get_response
null
def __load_config(self, filepath: str) -> None: config = configparser.ConfigParser(strict=False) with open(filepath, 'r', encoding='utf-8') as cfg: config.read_string('[root]\n' + cfg.read()) self.__read_config(config['root'])
(self, reader: asyncio.streams.StreamReader) -> Optional[str]
52,407
zabbix_utils.aiosender
__read_config
null
def __read_config(self, config: configparser.SectionProxy) -> None: server_row = config.get('ServerActive') or config.get('Server') or '127.0.0.1:10051' for cluster in server_row.split(','): self.clusters.append(Cluster(cluster.strip().split(';'))) if 'SourceIP' in config: self.source_ip = config.get('SourceIP') for key in config: if key.startswith('tls'): self.tls[key] = config.get(key)
(self, config: configparser.SectionProxy) -> NoneType
52,408
zabbix_utils.aiosender
__init__
null
def __init__(self, server: Optional[str] = None, port: int = 10051, use_config: bool = False, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, chunk_size: int = 250, clusters: Union[tuple, list] = None, ssl_context: Optional[Callable] = None, compression: bool = False, config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf'): self.timeout = timeout self.use_ipv6 = use_ipv6 self.tls = {} self.source_ip = None self.chunk_size = chunk_size self.compression = compression if ssl_context is not None: if not isinstance(ssl_context, Callable): raise TypeError('Value "ssl_context" should be a function.') from None self.ssl_context = ssl_context if source_ip is not None: self.source_ip = source_ip if use_config: self.clusters = [] self.__load_config(config_path) return if clusters is not None: if not (isinstance(clusters, tuple) or isinstance(clusters, list)): raise TypeError('Value "clusters" should be a tuple or a list.') from None clusters = clusters.copy() if server is not None: clusters.append([f"{server}:{port}"]) self.clusters = [Cluster(c) for c in clusters] else: self.clusters = [Cluster([f"{server or '127.0.0.1'}:{port}"])]
(self, server: Optional[str] = None, port: int = 10051, use_config: bool = False, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, chunk_size: int = 250, clusters: Union[tuple, list, NoneType] = None, ssl_context: Optional[Callable] = None, compression: bool = False, config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf')
52,409
zabbix_utils.aiosender
send
Sends packets and receives an answer from Zabbix. Args: items (list): List of ItemValue objects. Returns: TrapperResponse: Response from Zabbix server/proxy.
def __create_request(self, items: list) -> dict: return { "request": "sender data", "data": [i.to_json() for i in items] }
(self, items: list) -> zabbix_utils.types.TrapperResponse
52,410
zabbix_utils.aiosender
send_value
Sends one value and receives an answer from Zabbix. Args: host (str): Specify host name the item belongs to (as registered in Zabbix frontend). key (str): Specify item key to send value to. value (str): Specify item value. clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`. ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`. Returns: TrapperResponse: Response from Zabbix server/proxy.
def __create_request(self, items: list) -> dict: return { "request": "sender data", "data": [i.to_json() for i in items] }
(self, host: str, key: str, value: str, clock: Optional[int] = None, ns: Optional[int] = None) -> zabbix_utils.types.TrapperResponse
52,411
zabbix_utils
AsyncZabbixAPI
null
class AsyncZabbixAPI(): def __init__(self, *args, **kwargs): raise ModuleNotFoundError("No module named 'aiohttp'")
(*args, **kwargs)
52,412
zabbix_utils
__init__
null
def __init__(self, *args, **kwargs): raise ModuleNotFoundError("No module named 'aiohttp'")
(self, *args, **kwargs)
52,413
zabbix_utils.getter
Getter
Zabbix get synchronous implementation. Args: host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`. port (int, optional): Zabbix agent port. Defaults to `10050`. timeout (int, optional): Connection timeout value. Defaults to `10`. use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`. source_ip (str, optional): IP from which to establish connection. Defaults to `None`. socket_wrapper (Callable, optional): Func(`conn`) to wrap socket. Defaults to `None`.
class Getter(): """Zabbix get synchronous implementation. Args: host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`. port (int, optional): Zabbix agent port. Defaults to `10050`. timeout (int, optional): Connection timeout value. Defaults to `10`. use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`. source_ip (str, optional): IP from which to establish connection. Defaults to `None`. socket_wrapper (Callable, optional): Func(`conn`) to wrap socket. Defaults to `None`. """ def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, use_ipv6: bool = False, source_ip: Union[str, None] = None, socket_wrapper: Union[Callable, None] = None): self.host = host self.port = port self.timeout = timeout self.use_ipv6 = use_ipv6 self.source_ip = source_ip self.socket_wrapper = socket_wrapper if self.socket_wrapper: if not isinstance(self.socket_wrapper, Callable): raise TypeError('Value "socket_wrapper" should be a function.') def __get_response(self, conn: socket) -> Union[str, None]: result = ZabbixProtocol.parse_sync_packet(conn, log, ProcessingError) log.debug('Received data: %s', result) return result def get(self, key: str) -> Union[str, None]: """Gets item value from Zabbix agent by specified key. Args: key (str): Zabbix item key. Returns: str: Value from Zabbix agent for specified key. """ packet = ZabbixProtocol.create_packet(key, log) try: if self.use_ipv6: connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error: raise ProcessingError( f"Error creating socket for {self.host}:{self.port}") from None connection.settimeout(self.timeout) if self.source_ip: connection.bind((self.source_ip, 0,)) try: connection.connect((self.host, self.port)) if self.socket_wrapper is not None: connection = self.socket_wrapper(connection) connection.sendall(packet) except (TimeoutError, socket.timeout) as err: log.error( 'The connection to %s timed out after %d seconds', f"{self.host}:{self.port}", self.timeout ) connection.close() raise err except (ConnectionRefusedError, socket.gaierror) as err: log.error( 'An error occurred while trying to connect to %s: %s', f"{self.host}:{self.port}", getattr(err, 'msg', str(err)) ) connection.close() raise err except (OSError, socket.error) as err: log.warning( 'An error occurred while trying to send to %s: %s', f"{self.host}:{self.port}", getattr(err, 'msg', str(err)) ) connection.close() raise err try: response = self.__get_response(connection) except ConnectionResetError as err: log.debug('Get value error: %s', err) log.warning('Check access restrictions in Zabbix agent configuration.') raise err log.debug('Response from [%s:%s]: %s', self.host, self.port, response) try: connection.close() except socket.error: pass return AgentResponse(response)
(host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, socket_wrapper: Optional[Callable] = None)
52,414
zabbix_utils.getter
__get_response
null
def __get_response(self, conn: socket) -> Union[str, None]: result = ZabbixProtocol.parse_sync_packet(conn, log, ProcessingError) log.debug('Received data: %s', result) return result
(self, conn: <module 'socket' from '/usr/local/lib/python3.10/socket.py'>) -> Optional[str]
52,415
zabbix_utils.getter
__init__
null
def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, use_ipv6: bool = False, source_ip: Union[str, None] = None, socket_wrapper: Union[Callable, None] = None): self.host = host self.port = port self.timeout = timeout self.use_ipv6 = use_ipv6 self.source_ip = source_ip self.socket_wrapper = socket_wrapper if self.socket_wrapper: if not isinstance(self.socket_wrapper, Callable): raise TypeError('Value "socket_wrapper" should be a function.')
(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, socket_wrapper: Optional[Callable] = None)
52,416
zabbix_utils.getter
get
Gets item value from Zabbix agent by specified key. Args: key (str): Zabbix item key. Returns: str: Value from Zabbix agent for specified key.
def get(self, key: str) -> Union[str, None]: """Gets item value from Zabbix agent by specified key. Args: key (str): Zabbix item key. Returns: str: Value from Zabbix agent for specified key. """ packet = ZabbixProtocol.create_packet(key, log) try: if self.use_ipv6: connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error: raise ProcessingError( f"Error creating socket for {self.host}:{self.port}") from None connection.settimeout(self.timeout) if self.source_ip: connection.bind((self.source_ip, 0,)) try: connection.connect((self.host, self.port)) if self.socket_wrapper is not None: connection = self.socket_wrapper(connection) connection.sendall(packet) except (TimeoutError, socket.timeout) as err: log.error( 'The connection to %s timed out after %d seconds', f"{self.host}:{self.port}", self.timeout ) connection.close() raise err except (ConnectionRefusedError, socket.gaierror) as err: log.error( 'An error occurred while trying to connect to %s: %s', f"{self.host}:{self.port}", getattr(err, 'msg', str(err)) ) connection.close() raise err except (OSError, socket.error) as err: log.warning( 'An error occurred while trying to send to %s: %s', f"{self.host}:{self.port}", getattr(err, 'msg', str(err)) ) connection.close() raise err try: response = self.__get_response(connection) except ConnectionResetError as err: log.debug('Get value error: %s', err) log.warning('Check access restrictions in Zabbix agent configuration.') raise err log.debug('Response from [%s:%s]: %s', self.host, self.port, response) try: connection.close() except socket.error: pass return AgentResponse(response)
(self, key: str) -> Optional[str]
52,417
zabbix_utils.types
ItemValue
Contains data of a single item value. Args: host (str): Specify host name the item belongs to (as registered in Zabbix frontend). key (str): Specify item key to send value to. value (str): Specify item value. clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`. ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.
class ItemValue(): """Contains data of a single item value. Args: host (str): Specify host name the item belongs to (as registered in Zabbix frontend). key (str): Specify item key to send value to. value (str): Specify item value. clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`. ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`. """ def __init__(self, host: str, key: str, value: str, clock: Union[int, None] = None, ns: Union[int, None] = None): self.host = str(host) self.key = str(key) self.value = str(value) self.clock = None self.ns = None if clock is not None: try: self.clock = int(clock) except ValueError: raise ValueError( 'The clock value must be expressed in the Unix Timestamp format') from None if ns is not None: try: self.ns = int(ns) except ValueError: raise ValueError( 'The ns value must be expressed in the integer value of nanoseconds') from None def __str__(self) -> str: return json.dumps(self.to_json(), ensure_ascii=False) def __repr__(self) -> str: return self.__str__() def to_json(self) -> dict: """Represents ItemValue object in dictionary for json. Returns: dict: Object attributes in dictionary. """ return {k: v for k, v in self.__dict__.items() if v is not None}
(host: str, key: str, value: str, clock: Optional[int] = None, ns: Optional[int] = None)