index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
41,993 |
torch.fx.interpreter
|
boxed_run
|
Run `module` via interpretation and return the result. This uses the "boxed"
calling convention, where you pass a list of arguments, which will be cleared
by the interpreter. This ensures that input tensors are promptly deallocated.
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def boxed_run(self, args_list):
"""
Run `module` via interpretation and return the result. This uses the "boxed"
calling convention, where you pass a list of arguments, which will be cleared
by the interpreter. This ensures that input tensors are promptly deallocated.
"""
args_iter = iter(args_list)
env = {}
for n in self.graph.nodes:
if n.op == "placeholder":
env[n] = next(args_iter)
args_list.clear()
return self.run(initial_env=env)
|
(self, args_list)
|
41,994 |
torch.fx.interpreter
|
call_function
|
Execute a ``call_function`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the function invocation
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_function`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the function invocation
"""
assert not isinstance(target, str)
# Execute the function and return the result
return target(*args, **kwargs)
|
(self, target: Union[Callable[..., Any], str], args: Tuple[Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], ...], kwargs: Dict[str, Any]) -> Any
|
41,995 |
torch.fx.interpreter
|
call_method
|
Execute a ``call_method`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the method invocation
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_method`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the method invocation
"""
# args[0] is the `self` object for this method call
self_obj, *args_tail = args
# Execute the method and return the result
assert isinstance(target, str)
return getattr(self_obj, target)(*args_tail, **kwargs)
|
(self, target: Union[Callable[..., Any], str], args: Tuple[Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], ...], kwargs: Dict[str, Any]) -> Any
|
41,996 |
torch.fx.interpreter
|
call_module
|
Execute a ``call_module`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the module invocation
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_module`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the module invocation
"""
# Retrieve executed args and kwargs values from the environment
# Execute the method and return the result
assert isinstance(target, str)
submod = self.fetch_attr(target)
return submod(*args, **kwargs)
|
(self, target: Union[Callable[..., Any], str], args: Tuple[Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], ...], kwargs: Dict[str, Any]) -> Any
|
41,997 |
torch.fx.interpreter
|
fetch_args_kwargs_from_env
|
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
from the current execution environment.
Args:
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
Return:
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
"""
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
from the current execution environment.
Args:
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
Return:
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
"""
args = self.map_nodes_to_values(n.args, n)
assert isinstance(args, tuple)
kwargs = self.map_nodes_to_values(n.kwargs, n)
assert isinstance(kwargs, dict)
return args, kwargs
|
(self, n: torch.fx.node.Node) -> Tuple[Tuple, Dict]
|
41,998 |
torch.fx.interpreter
|
fetch_attr
|
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
Args:
target (str): The fully-qualified name of the attribute to fetch
Return:
Any: The value of the attribute.
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def fetch_attr(self, target : str):
"""
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
Args:
target (str): The fully-qualified name of the attribute to fetch
Return:
Any: The value of the attribute.
"""
target_atoms = target.split('.')
attr_itr = self.module
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
|
(self, target: str)
|
41,999 |
torch.fx.interpreter
|
get_attr
|
Execute a ``get_attr`` node. Will retrieve an attribute
value from the ``Module`` hierarchy of ``self.module``.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The value of the attribute that was retrieved
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``get_attr`` node. Will retrieve an attribute
value from the ``Module`` hierarchy of ``self.module``.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The value of the attribute that was retrieved
"""
assert isinstance(target, str)
return self.fetch_attr(target)
|
(self, target: Union[Callable[..., Any], str], args: Tuple[Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], ...], kwargs: Dict[str, Any]) -> Any
|
42,000 |
torch.fx.interpreter
|
map_nodes_to_values
|
Recursively descend through ``args`` and look up the concrete value
for each ``Node`` in the current execution environment.
Args:
args (Argument): Data structure within which to look up concrete values
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
"""
Recursively descend through ``args`` and look up the concrete value
for each ``Node`` in the current execution environment.
Args:
args (Argument): Data structure within which to look up concrete values
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
"""
def load_arg(n_arg : Node) -> Any:
if n_arg not in self.env:
raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
f'to diagnose such issues')
return self.env[n_arg]
return map_arg(args, load_arg)
|
(self, args: Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], n: torch.fx.node.Node) -> Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType]
|
42,001 |
torch.fx.interpreter
|
output
|
Execute an ``output`` node. This really just retrieves
the value referenced by the ``output`` node and returns it.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The return value referenced by the output node
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute an ``output`` node. This really just retrieves
the value referenced by the ``output`` node and returns it.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The return value referenced by the output node
"""
return args[0]
|
(self, target: Union[Callable[..., Any], str], args: Tuple[Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], ...], kwargs: Dict[str, Any]) -> Any
|
42,002 |
torch.fx.interpreter
|
placeholder
|
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Returns:
Any: The argument value that was retrieved.
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Returns:
Any: The argument value that was retrieved.
"""
assert isinstance(target, str)
if target.startswith('*'):
# For a starred parameter e.g. `*args`, retrieve all
# remaining values from the args list.
return list(self.args_iter)
else:
try:
return next(self.args_iter)
except StopIteration as si:
if len(args) > 0:
return args[0]
else:
raise RuntimeError(f'Expected positional argument for parameter {target}, but one was not passed in!') from si
|
(self, target: Union[Callable[..., Any], str], args: Tuple[Union[Tuple[Any, ...], List[Any], Dict[str, Any], slice, range, torch.fx.node.Node, str, int, float, bool, complex, torch.dtype, torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload, NoneType], ...], kwargs: Dict[str, Any]) -> Any
|
42,003 |
opt_einsum_fx._efficient_shape_prop
|
propagate
| null |
def propagate(self, *args):
return super().run(*args)
|
(self, *args)
|
42,004 |
torch.fx.interpreter
|
run
|
Run `module` via interpretation and return the result.
Args:
*args: The arguments to the Module to run, in positional order
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
This is a dict mapping `Node` to any value. This can be used, for example, to
pre-populate results for certain `Nodes` so as to do only partial evaluation within
the interpreter.
enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and
process_outputs function first before using them.
Returns:
Any: The value returned from executing the Module
.. note::
Backwards-compatibility for this API is guaranteed.
|
@compatibility(is_backward_compatible=True)
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None, enable_io_processing : bool = True) -> Any:
"""
Run `module` via interpretation and return the result.
Args:
*args: The arguments to the Module to run, in positional order
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
This is a dict mapping `Node` to any value. This can be used, for example, to
pre-populate results for certain `Nodes` so as to do only partial evaluation within
the interpreter.
enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and
process_outputs function first before using them.
Returns:
Any: The value returned from executing the Module
"""
self.env = initial_env if initial_env is not None else {}
# Positional function args are consumed left-to-right by
# `placeholder` nodes. Use an iterator to keep track of
# position and extract those values.
if enable_io_processing:
args = self.graph.process_inputs(*args)
self.args_iter : Iterator[Any] = iter(args)
pbar = tqdm(total=len(self.graph.nodes),
desc=f"{self.name}: {str(list(self.graph.nodes)) if config.verbose_progress else ''}",
initial=0, position=0, leave=True, disable=config.disable_progress, delay=0)
for node in self.graph.nodes:
pbar.update(1)
if node in self.env:
# Short circuit if we have this value. This could
# be used, for example, for partial evaluation
# where the caller has pre-populated `env` with
# values for a subset of the program.
continue
try:
self.env[node] = self.run_node(node)
except Exception as e:
if self.extra_traceback:
msg = f"While executing {node.format_node()}"
msg = f'{e.args[0]}\n\n{msg}' if e.args else str(msg)
msg += f"\nOriginal traceback:\n{node.stack_trace}"
e.args = (msg,) + e.args[1:]
if isinstance(e, KeyError):
raise RuntimeError(*e.args) from e
raise
if self.garbage_collect_values:
for to_delete in self.user_to_last_uses.get(node, []):
del self.env[to_delete]
if node.op == 'output':
output_val = self.env[node]
return self.graph.process_outputs(output_val) if enable_io_processing else output_val
|
(self, *args, initial_env: Optional[Dict[torch.fx.node.Node, Any]] = None, enable_io_processing: bool = True) -> Any
|
42,005 |
opt_einsum_fx._efficient_shape_prop
|
run_node
| null |
def run_node(self, n: Node) -> Any:
if n.op == "call_function" and n.target in _EINSUM_FUNCS:
args, kwargs = self.fetch_args_kwargs_from_env(n)
equation, *operands = args
shapes = [op.shape for op in operands]
assert len({op.dtype for op in operands}) == 1
meta = SimpleMeta(einsum_shape(equation, *shapes), operands[0].dtype)
result = torch.zeros((1,) * len(meta.shape), dtype=meta.dtype, device=operands[0].device).expand(meta.shape)
elif n.op == "call_function" and n.target == torch.tensordot:
args, kwargs = self.fetch_args_kwargs_from_env(n)
shape_a = [dim for i, dim in enumerate(args[0].shape) if i not in kwargs['dims'][0]]
shape_b = [dim for i, dim in enumerate(args[1].shape) if i not in kwargs['dims'][1]]
assert len({op.dtype for op in args}) == 1
meta = SimpleMeta(shape_a + shape_b, args[0].dtype)
result = torch.zeros((1,) * len(meta.shape), dtype=meta.dtype, device=args[0].device).expand(meta.shape)
else:
result = super().run_node(n)
if isinstance(result, torch.Tensor):
meta = SimpleMeta(result.shape, result.dtype)
else:
meta = None
n.meta = dict()
n.meta['tensor_meta'] = meta
n.meta['type'] = type(result)
return result
|
(self, n: torch.fx.node.Node) -> Any
|
42,010 |
opt_einsum_fx._fuse
|
fuse_einsums
|
Fuse einsums when possible.
When the output of one einsum is only used as an operand in another einsum, the two einsums can be fused into one.
Example:
.. code-block:: python
def fusable(x, y):
z = torch.einsum("ij,jk->ik", x, y)
return torch.einsum("ik,ij->i", z, x)
g = torch.fx.symbolic_trace(fusable)
print(fuse_einsums(g.graph).python_code(""))
gives::
import torch
def forward(self, x, y):
einsum_2 = torch.functional.einsum('ib,bk,ij->i', x, y, x); x = y = None
return einsum_2
Args:
graph: the graph to process.
in_place (bool, optional): whether to process ``graph`` in place.
Returns:
The graph with fused einsums.
|
def fuse_einsums(graph: fx.Graph, in_place: bool = False) -> fx.Graph:
"""Fuse einsums when possible.
When the output of one einsum is only used as an operand in another einsum, the two einsums can be fused into one.
Example:
.. code-block:: python
def fusable(x, y):
z = torch.einsum("ij,jk->ik", x, y)
return torch.einsum("ik,ij->i", z, x)
g = torch.fx.symbolic_trace(fusable)
print(fuse_einsums(g.graph).python_code(""))
gives::
import torch
def forward(self, x, y):
einsum_2 = torch.functional.einsum('ib,bk,ij->i', x, y, x); x = y = None
return einsum_2
Args:
graph: the graph to process.
in_place (bool, optional): whether to process ``graph`` in place.
Returns:
The graph with fused einsums.
"""
if not in_place:
graph = copy.deepcopy(graph)
for node in graph.nodes:
if node.op == "call_function" and node.target in _EINSUM_FUNCS:
our_inp_einstrs, our_out_einstr = _get_einstrs(node.args[0])
assert len(our_inp_einstrs) == len(node.args) - 1
avail_letters = iter(
set(string.ascii_lowercase)
- set.union(*(set(e) for e in our_inp_einstrs))
)
new_our_einstrs = []
new_our_args = []
we_fused_nodes = []
# Iterate over operands
for inp_idex, inp in enumerate(node.args[1:]):
if (
inp.op == "call_function"
and inp.target in _EINSUM_FUNCS
and len(inp.users) == 1
):
# This operand is the output of another einsum, and is not used by any other operation
# As a result, we can fuse it
its_inp_einstrs, its_out_einstr = _get_einstrs(inp.args[0])
if len(its_out_einstr) != len(our_inp_einstrs[inp_idex]):
raise RuntimeError(
f"Inconsistent rank: einsum `{node}`'s input {inp_idex} is the result of einsum {inp}; the output of `{inp}` is labeled `{its_out_einstr}` (rank {len(its_out_einstr)}), but the corresponding input of `{node}` is labeled `{our_inp_einstrs[inp_idex]}` (rank {len(our_inp_einstrs[inp_idex])})"
)
# First, we need to figure out which of its output dimensions correspond to our dimensions:
its_dim_to_ours = dict(
zip(its_out_einstr, our_inp_einstrs[inp_idex])
)
# assign any labels that don't show up in the output of the previous einsum --- and thus dont have labels in the current einsum --- to new letters
its_remaining_labels = set.union(
*(set(e) for e in its_inp_einstrs)
) - set(its_dim_to_ours.keys())
try:
its_dim_to_ours.update(
dict((i, next(avail_letters)) for i in its_remaining_labels)
)
except StopIteration:
# We ran out of letters
raise NotImplementedError(
f"At einsum {node}, ran out of letters when trying to fuse parameter einsum {inp}. A fallback for this case is not yet implimented."
)
else:
# We had enough letters, finish adding the fuse
del its_remaining_labels
new_our_args.extend(inp.args[1:])
new_our_einstrs.extend(
"".join(its_dim_to_ours[d] for d in es)
for es in its_inp_einstrs
)
we_fused_nodes.append(inp)
else:
# This argument is not from an einsum, or is from an einsum that is used elsewhere as well
# Thus we just pass it through
new_our_einstrs.append(our_inp_einstrs[inp_idex])
new_our_args.append(inp)
# -- end iter over prev einsum inputs --
# Set the new values for the einstrs
node.args = (f"{','.join(new_our_einstrs)}->{our_out_einstr}",) + tuple(
new_our_args
)
# Remove fused inputs
for to_remove in we_fused_nodes:
graph.erase_node(to_remove)
# -- end case for einsum nodes --
# -- end iter over nodes --
return graph
|
(graph: torch.fx.graph.Graph, in_place: bool = False) -> torch.fx.graph.Graph
|
42,011 |
opt_einsum_fx._fuse
|
fuse_scalars
|
Use the multilinearity of einsum to unify and remove constant scalars around einsums.
Args:
graph: the graph to process.
in_place (bool, optional): whether to process ``graph`` in place.
Returns:
The graph with fused scalars.
|
def fuse_scalars(graph: fx.Graph, in_place: bool = False) -> fx.Graph:
"""Use the multilinearity of einsum to unify and remove constant scalars around einsums.
Args:
graph: the graph to process.
in_place (bool, optional): whether to process ``graph`` in place.
Returns:
The graph with fused scalars.
"""
if not in_place:
graph = copy.deepcopy(graph)
# Clear any previous state this graph has
for node in graph.nodes:
if hasattr(node, "in_lin_chain"):
delattr(node, "in_lin_chain")
# Find chains of multilinear ops
seen_nodes = set()
linear_chains = []
for node in graph.nodes:
if id(node) in seen_nodes:
continue
# Determine a linear chain
cur_linear_chain = []
while (
id(node) not in seen_nodes
and getattr(node, "target", None) in SCALAR_COMMUTE_OPS
):
seen_nodes.add(id(node))
node.in_lin_chain = len(linear_chains)
cur_linear_chain.append(node)
# Continue building the chain regardless, since the merger uses this
users = list(node.users.keys())
if len(users) > 0:
# Get the next node in the chain
node = users[0]
else:
# This isn't used in the graph at all, break the chain
node = None
if len(users) != 1:
# End this chain
break
# If the next user, which is now in node, was seen but is itself in a linear chain, this means we merge them
# TODO: thoroughly test this
if hasattr(node, "in_lin_chain") and len(cur_linear_chain) > 0:
# Merge
merge_into = node.in_lin_chain
for n in cur_linear_chain:
n.in_lin_chain = merge_into
linear_chains[merge_into].extend(cur_linear_chain)
else:
# This is a new chain
linear_chains.append(cur_linear_chain)
# Accumulate scalars in them
scalars = []
for lin_chain_i, lin_chain in enumerate(linear_chains):
if len(lin_chain) < 2:
# There's nothing to do here: either the chain is empty,
# or there's only one operation — even if its a scalar multiplication,
# theres nothing for us to do with it
scalars.append(None)
continue
# Accumulate scalars
scalar_node_idexes = []
total_scalar = 1.0
for node_i, node in enumerate(lin_chain):
new_node, scalar = _get_node_and_scalar(node)
if scalar is not None:
total_scalar *= scalar
scalar_node_idexes.append(node_i)
is_all_scalars = len(scalar_node_idexes) == len(lin_chain)
# Remove scalar nodes
for node_i in scalar_node_idexes:
node = lin_chain[node_i]
new_node, scalar = _get_node_and_scalar(node)
assert scalar is not None
if is_all_scalars and node_i == len(lin_chain) - 1:
# If it's all scalars, we just put the total_scalar into the last operation
# and don't save a scalar for later
with graph.inserting_after(node):
new_node = graph.call_function(
operator.mul,
(total_scalar, new_node),
)
total_scalar = None
node.replace_all_uses_with(new_node)
graph.erase_node(node)
# Save the scalar for this chain
scalars.append(total_scalar)
# Remove all of the removed scalar operations from the lin chain
# See https://stackoverflow.com/a/11303234/1008938
for index in sorted(
(scalar_node_idexes[:-1] if is_all_scalars else scalar_node_idexes),
reverse=True,
):
del lin_chain[index]
del seen_nodes
# Make sure everything is still OK
graph.lint()
# Now we have chains without scalar operations; we can go through and add back in the scalars in the optimal place
for lin_chain_i, lin_chain in enumerate(linear_chains):
if (
len(lin_chain) == 0
or scalars[lin_chain_i] == 1.0
or scalars[lin_chain_i] is None
):
# Nothing to do with an empty chain
# No reason to add back a scalar that does nothing
# None signals don't process from above
continue
# Find the smallest argument or the output
smallest_node_i = None
smallest_arg_i = None
smallest_size = float("inf")
for node_i, node in enumerate(lin_chain):
for arg_i, arg in enumerate(node.args):
if not isinstance(arg, fx.Node):
continue
shape = get_shape(arg)
if shape is not None and prod(shape) < smallest_size:
smallest_node_i = node_i
smallest_arg_i = arg_i
smallest_size = prod(shape)
# Put the accumulated scalar on a node
if (smallest_node_i is None) or (
get_shape(lin_chain[-1]) is not None
and prod(get_shape(lin_chain[-1])) < smallest_size
):
# The output is the smallest, put it there
# OR there was no smallest argument, put it on the end of the chain
with graph.inserting_after(lin_chain[-1]):
new_node = graph.call_function(operator.mul, tuple()) # placeholder
lin_chain[-1].replace_all_uses_with(new_node)
new_node.args = (lin_chain[-1], scalars[lin_chain_i])
else:
# The smallest was someone's arg, so we replace that with a scalar multiplication:
with graph.inserting_before(lin_chain[smallest_node_i]):
new_arg = graph.call_function(
operator.mul,
(
lin_chain[smallest_node_i].args[smallest_arg_i],
scalars[lin_chain_i],
),
)
new_args = list(lin_chain[smallest_node_i].args)
new_args[smallest_arg_i] = new_arg
lin_chain[smallest_node_i].args = tuple(new_args)
graph.lint()
return graph
|
(graph: torch.fx.graph.Graph, in_place: bool = False) -> torch.fx.graph.Graph
|
42,013 |
opt_einsum_fx._script
|
jitable
|
Convert some torch calls into their TorchScript signatures.
In place. Currently deals with ``tensordot`` and ``permute``.
Args:
obj: the ``fx.Graph`` or ``fx.GraphModule`` to process.
Returns:
``obj``, modified in-place.
|
def jitable(obj: Union[fx.GraphModule, fx.Graph]) -> Union[fx.GraphModule, fx.Graph]:
"""Convert some torch calls into their TorchScript signatures.
In place. Currently deals with ``tensordot`` and ``permute``.
Args:
obj: the ``fx.Graph`` or ``fx.GraphModule`` to process.
Returns:
``obj``, modified in-place.
"""
if isinstance(obj, fx.GraphModule):
graph = obj.graph
else:
graph = obj
torch_is_ge_19: bool = version.parse(torch.__version__) >= version.parse("1.9.0")
for node in graph.nodes:
if node.op == "call_function":
if (
node.target == torch.tensordot
or node.target == torch.functional.tensordot
):
if "dims" in node.kwargs:
args = list(node.args)
kwargs = dict(node.kwargs)
dim_self, dim_other = kwargs.pop("dims")
assert len(args) == 2 # tensors 1 and 2
if torch_is_ge_19:
# In torch >= 1.9.0, they've corrected the torchscript interface
# to align with the python one:
args.append((list(dim_self), list(dim_other)))
else:
args.append(list(dim_self))
args.append(list(dim_other))
node.args = tuple(args)
node.kwargs = kwargs
elif node.op == "call_method":
if node.target == "permute":
self_arg, args = node.args[0], node.args[1:]
if not isinstance(args[0], list):
node.args = [self_arg, list(args)]
graph.lint()
if isinstance(obj, fx.GraphModule):
obj.recompile()
return obj
|
(obj: Union[torch.fx.graph_module.GraphModule, torch.fx.graph.Graph]) -> Union[torch.fx.graph_module.GraphModule, torch.fx.graph.Graph]
|
42,014 |
opt_einsum_fx._opt_ein
|
optimize_einsums
|
Optimize einsums in a ``torch.fx.Graph`` using ``opt_einsum``.
``graph`` must have shape information such as that populated by ``torch.fx.passes.shape_prop.ShapeProp``. The shapes are used for ``opt_einsum`` and the result is specific to the number of dimensions in the provided shapes ``opt_einsum``:
...while it will work for a set of arrays with the same ranks as the original shapes but differing sizes, it might no longer be optimal.
See the ``opt_einsum`` `documentation <https://optimized-einsum.readthedocs.io/en/stable/reusing_paths.html>`_ for more details.
Args:
graph (fx.Graph): the graph to optimize
contract_kwargs: extra keyword arguments for ``opt_einsum.contract_path``.
Returns:
An optimized ``fx.Graph``.
|
def optimize_einsums(graph: fx.Graph, contract_kwargs: dict = {}) -> fx.Graph:
"""Optimize einsums in a ``torch.fx.Graph`` using ``opt_einsum``.
``graph`` must have shape information such as that populated by ``torch.fx.passes.shape_prop.ShapeProp``. The shapes are used for ``opt_einsum`` and the result is specific to the number of dimensions in the provided shapes ``opt_einsum``:
...while it will work for a set of arrays with the same ranks as the original shapes but differing sizes, it might no longer be optimal.
See the ``opt_einsum`` `documentation <https://optimized-einsum.readthedocs.io/en/stable/reusing_paths.html>`_ for more details.
Args:
graph (fx.Graph): the graph to optimize
contract_kwargs: extra keyword arguments for ``opt_einsum.contract_path``.
Returns:
An optimized ``fx.Graph``.
"""
defaults = {
"optimize": "optimal",
}
defaults.update(contract_kwargs)
contract_kwargs = defaults
new_graph = fx.Graph()
tracer = fx.proxy.GraphAppendingTracer(new_graph)
# env keeps track of new injected nodes in addition to existing ones,
# making sure they get into new_graph
env = {}
node_processed: bool = False
for node in graph.nodes:
node_processed = False
if node.op == "call_function" and node.target in _EINSUM_FUNCS:
# Get shapes
shapes = [get_shape(a) for a in node.args[1:]]
if any(s is None for s in shapes):
warnings.warn(
f"einsum {repr(node)} lacked shape information; "
"not optimizing. "
"Did you forget to run ShapeProp on this graph?",
RuntimeWarning,
)
else:
# We have shapes, so:
# Determine the optimal contraction
path, path_info = opt_einsum.contract_path(
node.args[0], # the einstr
*shapes,
shapes=True,
**contract_kwargs,
)
# By wrapping the arguments with proxies,
# we can dispatch to opt_einsum and implicitly
# add it to the Graph by symbolically tracing it.
proxy_args = [
fx.Proxy(env[x.name], tracer=tracer) if isinstance(x, fx.Node) else x
for x in node.args
]
# Use _core_contract to avoid `len()` calls that
# fx can't deal with
output_proxy = _core_contract(
proxy_args[1:],
path_info.contraction_list,
backend="torch",
evaluate_constants=False,
)
# Operations on `Proxy` always yield new `Proxy`s, and the
# return value of our decomposition rule is no exception.
# We need to extract the underlying `Node` from the `Proxy`
# to use it in subsequent iterations of this transform.
new_node = output_proxy.node
env[node.name] = new_node
node_processed = True
if not node_processed:
# Default case: just copy the node over into the new graph.
new_node = new_graph.node_copy(node, lambda x: env[x.name])
env[node.name] = new_node
new_graph.lint()
return new_graph
|
(graph: torch.fx.graph.Graph, contract_kwargs: dict = {}) -> torch.fx.graph.Graph
|
42,015 |
opt_einsum_fx._opt_ein
|
optimize_einsums_full
|
Optimize einsums in ``model`` for ``example_inputs``.
All of the restrictions of ``torch.fx`` symbolic tracing apply.
Applies, in order, four optimizations:
1. Scalar accumulation --- use the multilinearity of einsum to collect all constant coefficients and divisors of operands and outputs
2. Fusing einsums --- gives greater flexibility to (3)
3. Optimized contraction with ``opt_einsum``.
4. Moving constant scalar coefficients through operations they commute with in order to place them on the smallest possible intermediate results
Args:
model (torch.nn.Module or callable or fx.Graph): the model, function, or ``fx.Graph`` to optimize.
example_inputs (tuple): arguments to ``model`` whose shapes will determine the einsum optimizations.
tracer_class (type, optional): the tracer class to use to turn ``model`` into an ``fx.Graph`` if it isn't already an ``fx.GraphModule`` or ``fx.Graph``.
Returns:
An optimized ``fx.GraphModule``, or if ``model`` is an ``fx.Graph``, an optimized ``fx.Graph``.
|
def optimize_einsums_full(
model: Union[torch.nn.Module, Callable, fx.Graph],
example_inputs: tuple,
contract_kwargs: dict = {},
tracer_class: type = fx.Tracer,
) -> Union[fx.GraphModule, fx.Graph]:
"""Optimize einsums in ``model`` for ``example_inputs``.
All of the restrictions of ``torch.fx`` symbolic tracing apply.
Applies, in order, four optimizations:
1. Scalar accumulation --- use the multilinearity of einsum to collect all constant coefficients and divisors of operands and outputs
2. Fusing einsums --- gives greater flexibility to (3)
3. Optimized contraction with ``opt_einsum``.
4. Moving constant scalar coefficients through operations they commute with in order to place them on the smallest possible intermediate results
Args:
model (torch.nn.Module or callable or fx.Graph): the model, function, or ``fx.Graph`` to optimize.
example_inputs (tuple): arguments to ``model`` whose shapes will determine the einsum optimizations.
tracer_class (type, optional): the tracer class to use to turn ``model`` into an ``fx.Graph`` if it isn't already an ``fx.GraphModule`` or ``fx.Graph``.
Returns:
An optimized ``fx.GraphModule``, or if ``model`` is an ``fx.Graph``, an optimized ``fx.Graph``.
"""
output_graph = False
if isinstance(model, fx.GraphModule):
graph: fx.Graph = model.graph
elif isinstance(model, fx.Graph):
graph: fx.Graph = model
model = torch.nn.Module()
output_graph = True
else:
tracer: fx.Tracer = tracer_class()
graph: fx.Graph = tracer.trace(model)
model = tracer.root
# 1. Scalar accumulation
# without shape information, this just accumulates scalars and moves them to the end of chains of linear operations
graph = fuse_scalars(graph)
# 2. Fuse any einsums we can
# This gives opt_einsum the most freedom possible to rearange things
# Since we already moved scalars to the end of chains of linear operations, any scalars between linear operations should already have been moved
graph = fuse_einsums(graph, in_place=True)
out_mod = fx.GraphModule(model, graph)
# 3. Shape propagation
sp = ShapeProp(out_mod)
sp.run(*example_inputs)
# 4. Optimize einsums
out_mod.graph = optimize_einsums(out_mod.graph, contract_kwargs)
out_mod.recompile()
# 5. Shape prop (again)
# We need shapes to put the scalars in the best place
sp = ShapeProp(out_mod)
sp.run(*example_inputs)
# 6. Final scalar fusion to move scalars
out_mod.graph = fuse_scalars(out_mod.graph, in_place=True)
if output_graph:
return out_mod.graph
else:
out_mod.recompile()
return out_mod
|
(model: Union[torch.nn.modules.module.Module, Callable, torch.fx.graph.Graph], example_inputs: tuple, contract_kwargs: dict = {}, tracer_class: type = <class 'torch.fx._symbolic_trace.Tracer'>) -> Union[torch.fx.graph_module.GraphModule, torch.fx.graph.Graph]
|
42,016 |
patch_ng
|
Hunk
|
Parsed hunk data container (hunk starts with @@ -R +R @@)
|
class Hunk(object):
""" Parsed hunk data container (hunk starts with @@ -R +R @@) """
def __init__(self):
self.startsrc=None #: line count starts with 1
self.linessrc=None
self.starttgt=None
self.linestgt=None
self.invalid=False
self.desc=''
self.text=[]
|
()
|
42,017 |
patch_ng
|
__init__
| null |
def __init__(self):
self.startsrc=None #: line count starts with 1
self.linessrc=None
self.starttgt=None
self.linestgt=None
self.invalid=False
self.desc=''
self.text=[]
|
(self)
|
42,018 |
patch_ng
|
NullHandler
|
Copied from Python 2.7 to avoid getting
`No handlers could be found for logger "patch"`
http://bugs.python.org/issue16539
|
class NullHandler(logging.Handler):
""" Copied from Python 2.7 to avoid getting
`No handlers could be found for logger "patch"`
http://bugs.python.org/issue16539
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
|
(level=0)
|
42,025 |
patch_ng
|
createLock
| null |
def createLock(self):
self.lock = None
|
(self)
|
42,026 |
patch_ng
|
emit
| null |
def emit(self, record):
pass
|
(self, record)
|
42,031 |
patch_ng
|
handle
| null |
def handle(self, record):
pass
|
(self, record)
|
42,038 |
patch_ng
|
Patch
|
Patch for a single file.
If used as an iterable, returns hunks.
|
class Patch(object):
""" Patch for a single file.
If used as an iterable, returns hunks.
"""
def __init__(self):
self.source = None
self.target = None
self.hunks = []
self.hunkends = []
self.header = []
self.type = None
def __iter__(self):
for h in self.hunks:
yield h
|
()
|
42,039 |
patch_ng
|
__init__
| null |
def __init__(self):
self.source = None
self.target = None
self.hunks = []
self.hunkends = []
self.header = []
self.type = None
|
(self)
|
42,040 |
patch_ng
|
__iter__
| null |
def __iter__(self):
for h in self.hunks:
yield h
|
(self)
|
42,041 |
patch_ng
|
PatchSet
|
PatchSet is a patch parser and container.
When used as an iterable, returns patches.
|
class PatchSet(object):
""" PatchSet is a patch parser and container.
When used as an iterable, returns patches.
"""
def __init__(self, stream=None):
# --- API accessible fields ---
# name of the PatchSet (filename or ...)
self.name = None
# patch set type - one of constants
self.type = None
# list of Patch objects
self.items = []
self.errors = 0 # fatal parsing errors
self.warnings = 0 # non-critical warnings
# --- /API ---
if stream:
self.parse(stream)
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
def parse(self, stream):
""" parse unified diff
return True on success
"""
lineends = dict(lf=0, crlf=0, cr=0)
nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
p = None
hunk = None
# hunkactual variable is used to calculate hunk lines for comparison
hunkactual = dict(linessrc=None, linestgt=None)
class wrapumerate(enumerate):
"""Enumerate wrapper that uses boolean end of stream status instead of
StopIteration exception, and properties to access line information.
"""
def __init__(self, *args, **kwargs):
# we don't call parent, it is magically created by __new__ method
self._exhausted = False
self._lineno = False # after end of stream equal to the num of lines
self._line = False # will be reset to False after end of stream
def next(self):
"""Try to read the next line and return True if it is available,
False if end of stream is reached."""
if self._exhausted:
return False
try:
self._lineno, self._line = compat_next(super(wrapumerate, self))
except StopIteration:
self._exhausted = True
self._line = False
return False
return True
@property
def is_empty(self):
return self._exhausted
@property
def line(self):
return self._line
@property
def lineno(self):
return self._lineno
# define states (possible file regions) that direct parse flow
headscan = True # start with scanning header
filenames = False # lines starting with --- and +++
hunkhead = False # @@ -R +R @@ sequence
hunkbody = False #
hunkskip = False # skipping invalid hunk mode
hunkparsed = False # state after successfully parsed hunk
# regexp to match start of hunk, used groups - 1,3,4,6
re_hunk_start = re.compile(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@")
self.errors = 0
# temp buffers for header and filenames info
header = []
srcname = None
tgtname = None
# start of main cycle
# each parsing block already has line available in fe.line
fe = wrapumerate(stream)
while fe.next():
# -- deciders: these only switch state to decide who should process
# -- line fetched at the start of this cycle
if hunkparsed:
hunkparsed = False
if re_hunk_start.match(fe.line):
hunkhead = True
elif fe.line.startswith(b"--- "):
filenames = True
else:
headscan = True
# -- ------------------------------------
# read out header
if headscan:
while not fe.is_empty and not fe.line.startswith(b"--- "):
header.append(fe.line)
fe.next()
if fe.is_empty:
if p is None:
debug("no patch data found") # error is shown later
self.errors += 1
else:
info("%d unparsed bytes left at the end of stream" % len(b''.join(header)))
self.warnings += 1
# TODO check for \No new line at the end..
# TODO test for unparsed bytes
# otherwise error += 1
# this is actually a loop exit
continue
headscan = False
# switch to filenames state
filenames = True
line = fe.line
lineno = fe.lineno
# hunkskip and hunkbody code skipped until definition of hunkhead is parsed
if hunkbody:
# [x] treat empty lines inside hunks as containing single space
# (this happens when diff is saved by copy/pasting to editor
# that strips trailing whitespace)
if line.strip(b"\r\n") == b"":
debug("expanding empty line in a middle of hunk body")
self.warnings += 1
line = b' ' + line
# process line first
if re.match(b"^[- \\+\\\\]", line):
# gather stats about line endings
if line.endswith(b"\r\n"):
p.hunkends["crlf"] += 1
elif line.endswith(b"\n"):
p.hunkends["lf"] += 1
elif line.endswith(b"\r"):
p.hunkends["cr"] += 1
if line.startswith(b"-"):
hunkactual["linessrc"] += 1
elif line.startswith(b"+"):
hunkactual["linestgt"] += 1
elif not line.startswith(b"\\"):
hunkactual["linessrc"] += 1
hunkactual["linestgt"] += 1
hunk.text.append(line)
# todo: handle \ No newline cases
else:
warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
# check exit conditions
if hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt:
warning("extra lines for hunk no.%d at %d for target %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
elif hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"]:
# hunk parsed successfully
p.hunks.append(hunk)
# switch to hunkparsed state
hunkbody = False
hunkparsed = True
# detect mixed window/unix line ends
ends = p.hunkends
if ((ends["cr"]!=0) + (ends["crlf"]!=0) + (ends["lf"]!=0)) > 1:
warning("inconsistent line ends in patch hunks for %s" % p.source)
self.warnings += 1
if debugmode:
debuglines = dict(ends)
debuglines.update(file=p.target, hunk=nexthunkno)
debug("crlf: %(crlf)d lf: %(lf)d cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
# fetch next line
continue
if hunkskip:
if re_hunk_start.match(line):
# switch to hunkhead state
hunkskip = False
hunkhead = True
elif line.startswith(b"--- "):
# switch to filenames state
hunkskip = False
filenames = True
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
if filenames:
if line.startswith(b"--- "):
if srcname != None:
# XXX testcase
warning("skipping false patch for %s" % srcname)
srcname = None
# XXX header += srcname
# double source filename line is encountered
# attempt to restart from this second line
# Files dated at Unix epoch don't exist, e.g.:
# '1970-01-01 01:00:00.000000000 +0100'
# They include timezone offsets.
# .. which can be parsed (if we remove the nanoseconds)
# .. by strptime() with:
# '%Y-%m-%d %H:%M:%S %z'
# .. but unfortunately this relies on the OSes libc
# strptime function and %z support is patchy, so we drop
# everything from the . onwards and group the year and time
# separately.
re_filename_date_time = b"^--- ([^\t]+)(?:\s([0-9-]+)\s([0-9:]+)|.*)"
match = re.match(re_filename_date_time, line)
# todo: support spaces in filenames
if match:
srcname = match.group(1).strip()
date = match.group(2)
time = match.group(3)
if (date == b'1970-01-01' or date == b'1969-12-31') and time.split(b':',1)[1] == b'00:00':
srcname = b'/dev/null'
else:
warning("skipping invalid filename at line %d" % (lineno+1))
self.errors += 1
# XXX p.header += line
# switch back to headscan state
filenames = False
headscan = True
elif not line.startswith(b"+++ "):
if srcname != None:
warning("skipping invalid patch with no target for %s" % srcname)
self.errors += 1
srcname = None
# XXX header += srcname
# XXX header += line
else:
# this should be unreachable
warning("skipping invalid target patch")
filenames = False
headscan = True
else:
if tgtname != None:
# XXX seems to be a dead branch
warning("skipping invalid patch - double target at line %d" % (lineno+1))
self.errors += 1
srcname = None
tgtname = None
# XXX header += srcname
# XXX header += tgtname
# XXX header += line
# double target filename line is encountered
# switch back to headscan state
filenames = False
headscan = True
else:
re_filename_date_time = b"^\+\+\+ ([^\t]+)(?:\s([0-9-]+)\s([0-9:]+)|.*)"
match = re.match(re_filename_date_time, line)
if not match:
warning("skipping invalid patch - no target filename at line %d" % (lineno+1))
self.errors += 1
srcname = None
# switch back to headscan state
filenames = False
headscan = True
else:
tgtname = match.group(1).strip()
date = match.group(2)
time = match.group(3)
if (date == b'1970-01-01' or date == b'1969-12-31') and time.split(b':',1)[1] == b'00:00':
tgtname = b'/dev/null'
if p: # for the first run p is None
self.items.append(p)
p = Patch()
p.source = srcname
srcname = None
p.target = tgtname
tgtname = None
p.header = header
header = []
# switch to hunkhead state
filenames = False
hunkhead = True
nexthunkno = 0
p.hunkends = lineends.copy()
continue
if hunkhead:
match = re.match(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@(.*)", line)
if not match:
if not p.hunks:
warning("skipping invalid patch with no hunks for file %s" % p.source)
self.errors += 1
# XXX review switch
# switch to headscan state
hunkhead = False
headscan = True
continue
else:
# TODO review condition case
# switch to headscan state
hunkhead = False
headscan = True
else:
hunk = Hunk()
hunk.startsrc = int(match.group(1))
hunk.linessrc = 1
if match.group(3): hunk.linessrc = int(match.group(3))
hunk.starttgt = int(match.group(4))
hunk.linestgt = 1
if match.group(6): hunk.linestgt = int(match.group(6))
hunk.invalid = False
hunk.desc = match.group(7)[1:].rstrip()
hunk.text = []
hunkactual["linessrc"] = hunkactual["linestgt"] = 0
# switch to hunkbody state
hunkhead = False
hunkbody = True
nexthunkno += 1
continue
# /while fe.next()
if p:
self.items.append(p)
if not hunkparsed:
if hunkskip:
warning("warning: finished with errors, some hunks may be invalid")
elif headscan:
if len(self.items) == 0:
warning("error: no patch data found!")
return False
else: # extra data at the end of file
pass
else:
warning("error: patch stream is incomplete!")
self.errors += 1
if len(self.items) == 0:
return False
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
# XXX fix total hunks calculation
debug("total files: %d total hunks: %d" % (len(self.items),
sum(len(p.hunks) for p in self.items)))
# ---- detect patch and patchset types ----
for idx, p in enumerate(self.items):
self.items[idx].type = self._detect_type(p)
types = set([p.type for p in self.items])
if len(types) > 1:
self.type = MIXED
else:
self.type = types.pop()
# --------
self._normalize_filenames()
return (self.errors == 0)
def _detect_type(self, p):
""" detect and return type for the specified Patch object
analyzes header and filenames info
NOTE: must be run before filenames are normalized
"""
# check for SVN
# - header starts with Index:
# - next line is ===... delimiter
# - filename is followed by revision number
# TODO add SVN revision
if (len(p.header) > 1 and p.header[-2].startswith(b"Index: ")
and p.header[-1].startswith(b"="*67)):
return SVN
# common checks for both HG and GIT
DVCS = ((p.source.startswith(b'a/') or p.source == b'/dev/null')
and (p.target.startswith(b'b/') or p.target == b'/dev/null'))
# GIT type check
# - header[-2] is like "diff --git a/oldname b/newname"
# - header[-1] is like "index <hash>..<hash> <mode>"
# TODO add git rename diffs and add/remove diffs
# add git diff with spaced filename
# TODO http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
# Git patch header len is 2 min
if len(p.header) > 1:
# detect the start of diff header - there might be some comments before
for idx in reversed(range(len(p.header))):
if p.header[idx].startswith(b"diff --git"):
break
if p.header[idx].startswith(b'diff --git a/'):
if (idx+1 < len(p.header)
and re.match(b'(?:index \\w{7}..\\w{7} \\d{6}|new file mode \\d*)', p.header[idx+1])):
if DVCS:
return GIT
# HG check
#
# - for plain HG format header is like "diff -r b2d9961ff1f5 filename"
# - for Git-style HG patches it is "diff --git a/oldname b/newname"
# - filename starts with a/, b/ or is equal to /dev/null
# - exported changesets also contain the header
# # HG changeset patch
# # User [email protected]
# ...
# TODO add MQ
# TODO add revision info
if len(p.header) > 0:
if DVCS and re.match(b'diff -r \\w{12} .*', p.header[-1]):
return HG
if DVCS and p.header[-1].startswith(b'diff --git a/'):
if len(p.header) == 1: # native Git patch header len is 2
return HG
elif p.header[0].startswith(b'# HG changeset patch'):
return HG
return PLAIN
def _normalize_filenames(self):
""" sanitize filenames, normalizing paths, i.e.:
1. strip a/ and b/ prefixes from GIT and HG style patches
2. remove all references to parent directories (with warning)
3. translate any absolute paths to relative (with warning)
[x] always use forward slashes to be crossplatform
(diff/patch were born as a unix utility after all)
return None
"""
if debugmode:
debug("normalize filenames")
for i,p in enumerate(self.items):
if debugmode:
debug(" patch type = %s" % p.type)
debug(" source = %s" % p.source)
debug(" target = %s" % p.target)
if p.type in (HG, GIT):
debug("stripping a/ and b/ prefixes")
if p.source != b'/dev/null':
if not p.source.startswith(b"a/"):
warning("invalid source filename")
else:
p.source = p.source[2:]
if p.target != b'/dev/null':
if not p.target.startswith(b"b/"):
warning("invalid target filename")
else:
p.target = p.target[2:]
p.source = xnormpath(p.source)
p.target = xnormpath(p.target)
sep = b'/' # sep value can be hardcoded, but it looks nice this way
# references to parent are not allowed
if p.source.startswith(b".." + sep):
warning("error: stripping parent path for source file patch no.%d" % (i+1))
self.warnings += 1
while p.source.startswith(b".." + sep):
p.source = p.source.partition(sep)[2]
if p.target.startswith(b".." + sep):
warning("error: stripping parent path for target file patch no.%d" % (i+1))
self.warnings += 1
while p.target.startswith(b".." + sep):
p.target = p.target.partition(sep)[2]
# absolute paths are not allowed
if (xisabs(p.source) and p.source != b'/dev/null') or \
(xisabs(p.target) and p.target != b'/dev/null'):
warning("error: absolute paths are not allowed - file no.%d" % (i+1))
self.warnings += 1
if xisabs(p.source) and p.source != b'/dev/null':
warning("stripping absolute path from source name '%s'" % p.source)
p.source = xstrip(p.source)
if xisabs(p.target) and p.target != b'/dev/null':
warning("stripping absolute path from target name '%s'" % p.target)
p.target = xstrip(p.target)
self.items[i].source = p.source
self.items[i].target = p.target
def diffstat(self):
""" calculate diffstat and return as a string
Notes:
- original diffstat ouputs target filename
- single + or - shouldn't escape histogram
"""
names = []
insert = []
delete = []
delta = 0 # size change in bytes
namelen = 0
maxdiff = 0 # max number of changes for single file
# (for histogram width calculation)
for patch in self.items:
i,d = 0,0
for hunk in patch.hunks:
for line in hunk.text:
if line.startswith(b'+'):
i += 1
delta += len(line)-1
elif line.startswith(b'-'):
d += 1
delta -= len(line)-1
names.append(patch.target)
insert.append(i)
delete.append(d)
namelen = max(namelen, len(patch.target))
maxdiff = max(maxdiff, i+d)
output = ''
statlen = len(str(maxdiff)) # stats column width
for i,n in enumerate(names):
# %-19s | %-4d %s
format = " %-" + str(namelen) + "s | %" + str(statlen) + "s %s\n"
hist = ''
# -- calculating histogram --
width = len(format % ('', '', ''))
histwidth = max(2, 80 - width)
if maxdiff < histwidth:
hist = "+"*insert[i] + "-"*delete[i]
else:
iratio = (float(insert[i]) / maxdiff) * histwidth
dratio = (float(delete[i]) / maxdiff) * histwidth
# make sure every entry gets at least one + or -
iwidth = 1 if 0 < iratio < 1 else int(iratio)
dwidth = 1 if 0 < dratio < 1 else int(dratio)
#print(iratio, dratio, iwidth, dwidth, histwidth)
hist = "+"*int(iwidth) + "-"*int(dwidth)
# -- /calculating +- histogram --
output += (format % (tostr(names[i]), str(insert[i] + delete[i]), hist))
output += (" %d files changed, %d insertions(+), %d deletions(-), %+d bytes"
% (len(names), sum(insert), sum(delete), delta))
return output
def findfiles(self, old, new):
""" return tuple of source file, target file """
if old == b'/dev/null':
handle, abspath = tempfile.mkstemp(suffix='pypatch')
abspath = abspath.encode()
# The source file must contain a line for the hunk matching to succeed.
os.write(handle, b' ')
os.close(handle)
if not exists(new):
handle = open(new, 'wb')
handle.close()
return abspath, new
elif exists(old):
return old, old
elif exists(new):
return new, new
elif new == b'/dev/null':
return None, None
else:
# [w] Google Code generates broken patches with its online editor
debug("broken patch from Google Code, stripping prefixes..")
if old.startswith(b'a/') and new.startswith(b'b/'):
old, new = old[2:], new[2:]
debug(" %s" % old)
debug(" %s" % new)
if exists(old):
return old, old
elif exists(new):
return new, new
return None, None
def _strip_prefix(self, filename):
if filename.startswith(b'a/') or filename.startswith(b'b/'):
return filename[2:]
return filename
def decode_clean(self, path, prefix):
path = path.decode("utf-8").replace("\\", "/")
if path.startswith(prefix):
path = path[2:]
return path
def strip_path(self, path, base_path, strip=0):
tokens = path.split("/")
if len(tokens) > 1:
tokens = tokens[strip:]
path = "/".join(tokens)
if base_path:
path = os.path.join(base_path, path)
return path
# account for new and deleted files, upstream dep won't fix them
def apply(self, strip=0, root=None, fuzz=False):
""" Apply parsed patch, optionally stripping leading components
from file paths. `root` parameter specifies working dir.
:param strip: Strip patch path
:param root: Folder to apply the patch
:param fuzz: Accept fuzzy patches
return True on success
"""
items = []
for item in self.items:
source = self.decode_clean(item.source, "a/")
target = self.decode_clean(item.target, "b/")
if "dev/null" in source:
target = self.strip_path(target, root, strip)
hunks = [s.decode("utf-8") for s in item.hunks[0].text]
new_file = "".join(hunk[1:] for hunk in hunks)
save(target, new_file)
elif "dev/null" in target:
source = self.strip_path(source, root, strip)
safe_unlink(source)
else:
items.append(item)
self.items = items
if root:
prevdir = os.getcwd()
os.chdir(root)
total = len(self.items)
errors = 0
if strip:
# [ ] test strip level exceeds nesting level
# [ ] test the same only for selected files
# [ ] test if files end up being on the same level
try:
strip = int(strip)
except ValueError:
errors += 1
warning("error: strip parameter '%s' must be an integer" % strip)
strip = 0
#for fileno, filename in enumerate(self.source):
for i,p in enumerate(self.items):
if strip:
debug("stripping %s leading component(s) from:" % strip)
debug(" %s" % p.source)
debug(" %s" % p.target)
old = p.source if p.source == b'/dev/null' else pathstrip(p.source, strip)
new = p.target if p.target == b'/dev/null' else pathstrip(p.target, strip)
else:
old, new = p.source, p.target
filenameo, filenamen = self.findfiles(old, new)
if not filenameo or not filenamen:
error("source/target file does not exist:\n --- %s\n +++ %s" % (old, new))
errors += 1
continue
if not isfile(filenameo):
error("not a file - %s" % filenameo)
errors += 1
continue
# [ ] check absolute paths security here
debug("processing %d/%d:\t %s" % (i+1, total, filenamen))
# validate before patching
f2fp = open(filenameo, 'rb')
hunkno = 0
hunk = p.hunks[hunkno]
hunkfind = []
hunkreplace = []
validhunks = 0
canpatch = False
for lineno, line in enumerate(f2fp):
if lineno+1 < hunk.startsrc:
continue
elif lineno+1 == hunk.startsrc:
hunkfind = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" -"]
hunkreplace = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" +"]
#pprint(hunkreplace)
hunklineno = 0
# todo \ No newline at end of file
# check hunks in source file
if lineno+1 < hunk.startsrc+len(hunkfind):
if line.rstrip(b"\r\n") == hunkfind[hunklineno]:
hunklineno += 1
else:
warning("file %d/%d:\t %s" % (i+1, total, filenamen))
warning(" hunk no.%d doesn't match source file at line %d" % (hunkno+1, lineno+1))
warning(" expected: %s" % hunkfind[hunklineno])
warning(" actual : %s" % line.rstrip(b"\r\n"))
if fuzz:
hunklineno += 1
else:
# not counting this as error, because file may already be patched.
# check if file is already patched is done after the number of
# invalid hunks if found
# TODO: check hunks against source/target file in one pass
# API - check(stream, srchunks, tgthunks)
# return tuple (srcerrs, tgterrs)
# continue to check other hunks for completeness
hunkno += 1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
continue
else:
break
# check if processed line is the last line
if len(hunkfind) == 0 or lineno+1 == hunk.startsrc+len(hunkfind)-1:
debug(" hunk no.%d for file %s -- is ready to be patched" % (hunkno+1, filenamen))
hunkno+=1
validhunks+=1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
else:
if validhunks == len(p.hunks):
# patch file
canpatch = True
break
else:
if hunkno < len(p.hunks):
error("premature end of source file %s at hunk %d" % (filenameo, hunkno+1))
errors += 1
f2fp.close()
if validhunks < len(p.hunks):
if self._match_file_hunks(filenameo, p.hunks):
warning("already patched %s" % filenameo)
else:
if fuzz:
warning("source file is different - %s" % filenameo)
else:
error("source file is different - %s" % filenameo)
errors += 1
if canpatch:
backupname = filenamen+b".orig"
if exists(backupname):
warning("can't backup original file to %s - aborting" % backupname)
errors += 1
else:
shutil.move(filenamen, backupname)
if self.write_hunks(backupname if filenameo == filenamen else filenameo, filenamen, p.hunks):
info("successfully patched %d/%d:\t %s" % (i+1, total, filenamen))
safe_unlink(backupname)
if new == b'/dev/null':
# check that filename is of size 0 and delete it.
if os.path.getsize(filenamen) > 0:
warning("expected patched file to be empty as it's marked as deletion:\t %s" % filenamen)
safe_unlink(filenamen)
else:
errors += 1
warning("error patching file %s" % filenamen)
shutil.copy(filenamen, filenamen+".invalid")
warning("invalid version is saved to %s" % filenamen+".invalid")
# todo: proper rejects
shutil.move(backupname, filenamen)
if root:
os.chdir(prevdir)
# todo: check for premature eof
return (errors == 0)
def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
h.startsrc, h.starttgt = h.starttgt, h.startsrc
h.linessrc, h.linestgt = h.linestgt, h.linessrc
for i,line in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:]
elif line[0:1] == b'-':
h.text[i] = b'+' +line[1:]
def revert(self, strip=0, root=None):
""" apply patch in reverse order """
reverted = copy.deepcopy(self)
reverted._reverse()
return reverted.apply(strip, root)
def can_patch(self, filename):
""" Check if specified filename can be patched. Returns None if file can
not be found among source filenames. False if patch can not be applied
clearly. True otherwise.
:returns: True, False or None
"""
filename = abspath(filename)
for p in self.items:
if filename == abspath(p.source):
return self._match_file_hunks(filename, p.hunks)
return None
def _match_file_hunks(self, filepath, hunks):
matched = True
fp = open(abspath(filepath), 'rb')
class NoMatch(Exception):
pass
lineno = 1
line = fp.readline()
try:
for hno, h in enumerate(hunks):
# skip to first line of the hunk
while lineno < h.starttgt:
if not len(line): # eof
debug("check failed - premature eof before hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
for hline in h.text:
if hline.startswith(b"-"):
continue
if not len(line):
debug("check failed - premature eof on hunk: %d" % (hno+1))
# todo: \ No newline at the end of file
raise NoMatch
if line.rstrip(b"\r\n") != hline[1:].rstrip(b"\r\n"):
debug("file is not patched - failed hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
except NoMatch:
matched = False
# todo: display failed hunk, i.e. expected/found
fp.close()
return matched
def patch_stream(self, instream, hunks):
""" Generator that yields stream patched with hunks iterable
Converts lineends in hunk lines to the best suitable format
autodetected from input
"""
# todo: At the moment substituted lineends may not be the same
# at the start and at the end of patching. Also issue a
# warning/throw about mixed lineends (is it really needed?)
hunks = iter(hunks)
srclineno = 1
lineends = {b'\n':0, b'\r\n':0, b'\r':0}
def get_line():
"""
local utility function - return line from source stream
collecting line end statistics on the way
"""
line = instream.readline()
# 'U' mode works only with text files
if line.endswith(b"\r\n"):
lineends[b"\r\n"] += 1
elif line.endswith(b"\n"):
lineends[b"\n"] += 1
elif line.endswith(b"\r"):
lineends[b"\r"] += 1
return line
for hno, h in enumerate(hunks):
debug("hunk %d" % (hno+1))
# skip to line just before hunk starts
while srclineno < h.startsrc:
yield get_line()
srclineno += 1
for hline in h.text:
# todo: check \ No newline at the end of file
if hline.startswith(b"-") or hline.startswith(b"\\"):
get_line()
srclineno += 1
continue
else:
if not hline.startswith(b"+"):
yield get_line()
srclineno += 1
continue
line2write = hline[1:]
# detect if line ends are consistent in source file
if sum([bool(lineends[x]) for x in lineends]) == 1:
newline = [x for x in lineends if lineends[x] != 0][0]
yield line2write.rstrip(b"\r\n")+newline
else: # newlines are mixed
yield line2write
for line in instream:
yield line
def write_hunks(self, srcname, tgtname, hunks):
src = open(srcname, "rb")
tgt = open(tgtname, "wb")
debug("processing target file %s" % tgtname)
tgt.writelines(self.patch_stream(src, hunks))
tgt.close()
src.close()
# [ ] TODO: add test for permission copy
shutil.copymode(srcname, tgtname)
return True
def dump(self):
for p in self.items:
for headline in p.header:
print(headline.rstrip('\n'))
print('--- ' + p.source)
print('+++ ' + p.target)
for h in p.hunks:
print('@@ -%s,%s +%s,%s @@' % (h.startsrc, h.linessrc, h.starttgt, h.linestgt))
for line in h.text:
print(line.rstrip('\n'))
|
(stream=None)
|
42,042 |
patch_ng
|
__init__
| null |
def __init__(self, stream=None):
# --- API accessible fields ---
# name of the PatchSet (filename or ...)
self.name = None
# patch set type - one of constants
self.type = None
# list of Patch objects
self.items = []
self.errors = 0 # fatal parsing errors
self.warnings = 0 # non-critical warnings
# --- /API ---
if stream:
self.parse(stream)
|
(self, stream=None)
|
42,043 |
patch_ng
|
__iter__
| null |
def __iter__(self):
for i in self.items:
yield i
|
(self)
|
42,044 |
patch_ng
|
__len__
| null |
def __len__(self):
return len(self.items)
|
(self)
|
42,045 |
patch_ng
|
_detect_type
|
detect and return type for the specified Patch object
analyzes header and filenames info
NOTE: must be run before filenames are normalized
|
def _detect_type(self, p):
""" detect and return type for the specified Patch object
analyzes header and filenames info
NOTE: must be run before filenames are normalized
"""
# check for SVN
# - header starts with Index:
# - next line is ===... delimiter
# - filename is followed by revision number
# TODO add SVN revision
if (len(p.header) > 1 and p.header[-2].startswith(b"Index: ")
and p.header[-1].startswith(b"="*67)):
return SVN
# common checks for both HG and GIT
DVCS = ((p.source.startswith(b'a/') or p.source == b'/dev/null')
and (p.target.startswith(b'b/') or p.target == b'/dev/null'))
# GIT type check
# - header[-2] is like "diff --git a/oldname b/newname"
# - header[-1] is like "index <hash>..<hash> <mode>"
# TODO add git rename diffs and add/remove diffs
# add git diff with spaced filename
# TODO http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
# Git patch header len is 2 min
if len(p.header) > 1:
# detect the start of diff header - there might be some comments before
for idx in reversed(range(len(p.header))):
if p.header[idx].startswith(b"diff --git"):
break
if p.header[idx].startswith(b'diff --git a/'):
if (idx+1 < len(p.header)
and re.match(b'(?:index \\w{7}..\\w{7} \\d{6}|new file mode \\d*)', p.header[idx+1])):
if DVCS:
return GIT
# HG check
#
# - for plain HG format header is like "diff -r b2d9961ff1f5 filename"
# - for Git-style HG patches it is "diff --git a/oldname b/newname"
# - filename starts with a/, b/ or is equal to /dev/null
# - exported changesets also contain the header
# # HG changeset patch
# # User [email protected]
# ...
# TODO add MQ
# TODO add revision info
if len(p.header) > 0:
if DVCS and re.match(b'diff -r \\w{12} .*', p.header[-1]):
return HG
if DVCS and p.header[-1].startswith(b'diff --git a/'):
if len(p.header) == 1: # native Git patch header len is 2
return HG
elif p.header[0].startswith(b'# HG changeset patch'):
return HG
return PLAIN
|
(self, p)
|
42,046 |
patch_ng
|
_match_file_hunks
| null |
def _match_file_hunks(self, filepath, hunks):
matched = True
fp = open(abspath(filepath), 'rb')
class NoMatch(Exception):
pass
lineno = 1
line = fp.readline()
try:
for hno, h in enumerate(hunks):
# skip to first line of the hunk
while lineno < h.starttgt:
if not len(line): # eof
debug("check failed - premature eof before hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
for hline in h.text:
if hline.startswith(b"-"):
continue
if not len(line):
debug("check failed - premature eof on hunk: %d" % (hno+1))
# todo: \ No newline at the end of file
raise NoMatch
if line.rstrip(b"\r\n") != hline[1:].rstrip(b"\r\n"):
debug("file is not patched - failed hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
except NoMatch:
matched = False
# todo: display failed hunk, i.e. expected/found
fp.close()
return matched
|
(self, filepath, hunks)
|
42,047 |
patch_ng
|
_normalize_filenames
|
sanitize filenames, normalizing paths, i.e.:
1. strip a/ and b/ prefixes from GIT and HG style patches
2. remove all references to parent directories (with warning)
3. translate any absolute paths to relative (with warning)
[x] always use forward slashes to be crossplatform
(diff/patch were born as a unix utility after all)
return None
|
def _normalize_filenames(self):
""" sanitize filenames, normalizing paths, i.e.:
1. strip a/ and b/ prefixes from GIT and HG style patches
2. remove all references to parent directories (with warning)
3. translate any absolute paths to relative (with warning)
[x] always use forward slashes to be crossplatform
(diff/patch were born as a unix utility after all)
return None
"""
if debugmode:
debug("normalize filenames")
for i,p in enumerate(self.items):
if debugmode:
debug(" patch type = %s" % p.type)
debug(" source = %s" % p.source)
debug(" target = %s" % p.target)
if p.type in (HG, GIT):
debug("stripping a/ and b/ prefixes")
if p.source != b'/dev/null':
if not p.source.startswith(b"a/"):
warning("invalid source filename")
else:
p.source = p.source[2:]
if p.target != b'/dev/null':
if not p.target.startswith(b"b/"):
warning("invalid target filename")
else:
p.target = p.target[2:]
p.source = xnormpath(p.source)
p.target = xnormpath(p.target)
sep = b'/' # sep value can be hardcoded, but it looks nice this way
# references to parent are not allowed
if p.source.startswith(b".." + sep):
warning("error: stripping parent path for source file patch no.%d" % (i+1))
self.warnings += 1
while p.source.startswith(b".." + sep):
p.source = p.source.partition(sep)[2]
if p.target.startswith(b".." + sep):
warning("error: stripping parent path for target file patch no.%d" % (i+1))
self.warnings += 1
while p.target.startswith(b".." + sep):
p.target = p.target.partition(sep)[2]
# absolute paths are not allowed
if (xisabs(p.source) and p.source != b'/dev/null') or \
(xisabs(p.target) and p.target != b'/dev/null'):
warning("error: absolute paths are not allowed - file no.%d" % (i+1))
self.warnings += 1
if xisabs(p.source) and p.source != b'/dev/null':
warning("stripping absolute path from source name '%s'" % p.source)
p.source = xstrip(p.source)
if xisabs(p.target) and p.target != b'/dev/null':
warning("stripping absolute path from target name '%s'" % p.target)
p.target = xstrip(p.target)
self.items[i].source = p.source
self.items[i].target = p.target
|
(self)
|
42,048 |
patch_ng
|
_reverse
|
reverse patch direction (this doesn't touch filenames)
|
def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
h.startsrc, h.starttgt = h.starttgt, h.startsrc
h.linessrc, h.linestgt = h.linestgt, h.linessrc
for i,line in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:]
elif line[0:1] == b'-':
h.text[i] = b'+' +line[1:]
|
(self)
|
42,049 |
patch_ng
|
_strip_prefix
| null |
def _strip_prefix(self, filename):
if filename.startswith(b'a/') or filename.startswith(b'b/'):
return filename[2:]
return filename
|
(self, filename)
|
42,050 |
patch_ng
|
apply
|
Apply parsed patch, optionally stripping leading components
from file paths. `root` parameter specifies working dir.
:param strip: Strip patch path
:param root: Folder to apply the patch
:param fuzz: Accept fuzzy patches
return True on success
|
def apply(self, strip=0, root=None, fuzz=False):
""" Apply parsed patch, optionally stripping leading components
from file paths. `root` parameter specifies working dir.
:param strip: Strip patch path
:param root: Folder to apply the patch
:param fuzz: Accept fuzzy patches
return True on success
"""
items = []
for item in self.items:
source = self.decode_clean(item.source, "a/")
target = self.decode_clean(item.target, "b/")
if "dev/null" in source:
target = self.strip_path(target, root, strip)
hunks = [s.decode("utf-8") for s in item.hunks[0].text]
new_file = "".join(hunk[1:] for hunk in hunks)
save(target, new_file)
elif "dev/null" in target:
source = self.strip_path(source, root, strip)
safe_unlink(source)
else:
items.append(item)
self.items = items
if root:
prevdir = os.getcwd()
os.chdir(root)
total = len(self.items)
errors = 0
if strip:
# [ ] test strip level exceeds nesting level
# [ ] test the same only for selected files
# [ ] test if files end up being on the same level
try:
strip = int(strip)
except ValueError:
errors += 1
warning("error: strip parameter '%s' must be an integer" % strip)
strip = 0
#for fileno, filename in enumerate(self.source):
for i,p in enumerate(self.items):
if strip:
debug("stripping %s leading component(s) from:" % strip)
debug(" %s" % p.source)
debug(" %s" % p.target)
old = p.source if p.source == b'/dev/null' else pathstrip(p.source, strip)
new = p.target if p.target == b'/dev/null' else pathstrip(p.target, strip)
else:
old, new = p.source, p.target
filenameo, filenamen = self.findfiles(old, new)
if not filenameo or not filenamen:
error("source/target file does not exist:\n --- %s\n +++ %s" % (old, new))
errors += 1
continue
if not isfile(filenameo):
error("not a file - %s" % filenameo)
errors += 1
continue
# [ ] check absolute paths security here
debug("processing %d/%d:\t %s" % (i+1, total, filenamen))
# validate before patching
f2fp = open(filenameo, 'rb')
hunkno = 0
hunk = p.hunks[hunkno]
hunkfind = []
hunkreplace = []
validhunks = 0
canpatch = False
for lineno, line in enumerate(f2fp):
if lineno+1 < hunk.startsrc:
continue
elif lineno+1 == hunk.startsrc:
hunkfind = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" -"]
hunkreplace = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" +"]
#pprint(hunkreplace)
hunklineno = 0
# todo \ No newline at end of file
# check hunks in source file
if lineno+1 < hunk.startsrc+len(hunkfind):
if line.rstrip(b"\r\n") == hunkfind[hunklineno]:
hunklineno += 1
else:
warning("file %d/%d:\t %s" % (i+1, total, filenamen))
warning(" hunk no.%d doesn't match source file at line %d" % (hunkno+1, lineno+1))
warning(" expected: %s" % hunkfind[hunklineno])
warning(" actual : %s" % line.rstrip(b"\r\n"))
if fuzz:
hunklineno += 1
else:
# not counting this as error, because file may already be patched.
# check if file is already patched is done after the number of
# invalid hunks if found
# TODO: check hunks against source/target file in one pass
# API - check(stream, srchunks, tgthunks)
# return tuple (srcerrs, tgterrs)
# continue to check other hunks for completeness
hunkno += 1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
continue
else:
break
# check if processed line is the last line
if len(hunkfind) == 0 or lineno+1 == hunk.startsrc+len(hunkfind)-1:
debug(" hunk no.%d for file %s -- is ready to be patched" % (hunkno+1, filenamen))
hunkno+=1
validhunks+=1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
else:
if validhunks == len(p.hunks):
# patch file
canpatch = True
break
else:
if hunkno < len(p.hunks):
error("premature end of source file %s at hunk %d" % (filenameo, hunkno+1))
errors += 1
f2fp.close()
if validhunks < len(p.hunks):
if self._match_file_hunks(filenameo, p.hunks):
warning("already patched %s" % filenameo)
else:
if fuzz:
warning("source file is different - %s" % filenameo)
else:
error("source file is different - %s" % filenameo)
errors += 1
if canpatch:
backupname = filenamen+b".orig"
if exists(backupname):
warning("can't backup original file to %s - aborting" % backupname)
errors += 1
else:
shutil.move(filenamen, backupname)
if self.write_hunks(backupname if filenameo == filenamen else filenameo, filenamen, p.hunks):
info("successfully patched %d/%d:\t %s" % (i+1, total, filenamen))
safe_unlink(backupname)
if new == b'/dev/null':
# check that filename is of size 0 and delete it.
if os.path.getsize(filenamen) > 0:
warning("expected patched file to be empty as it's marked as deletion:\t %s" % filenamen)
safe_unlink(filenamen)
else:
errors += 1
warning("error patching file %s" % filenamen)
shutil.copy(filenamen, filenamen+".invalid")
warning("invalid version is saved to %s" % filenamen+".invalid")
# todo: proper rejects
shutil.move(backupname, filenamen)
if root:
os.chdir(prevdir)
# todo: check for premature eof
return (errors == 0)
|
(self, strip=0, root=None, fuzz=False)
|
42,051 |
patch_ng
|
can_patch
|
Check if specified filename can be patched. Returns None if file can
not be found among source filenames. False if patch can not be applied
clearly. True otherwise.
:returns: True, False or None
|
def can_patch(self, filename):
""" Check if specified filename can be patched. Returns None if file can
not be found among source filenames. False if patch can not be applied
clearly. True otherwise.
:returns: True, False or None
"""
filename = abspath(filename)
for p in self.items:
if filename == abspath(p.source):
return self._match_file_hunks(filename, p.hunks)
return None
|
(self, filename)
|
42,052 |
patch_ng
|
decode_clean
| null |
def decode_clean(self, path, prefix):
path = path.decode("utf-8").replace("\\", "/")
if path.startswith(prefix):
path = path[2:]
return path
|
(self, path, prefix)
|
42,053 |
patch_ng
|
diffstat
|
calculate diffstat and return as a string
Notes:
- original diffstat ouputs target filename
- single + or - shouldn't escape histogram
|
def diffstat(self):
""" calculate diffstat and return as a string
Notes:
- original diffstat ouputs target filename
- single + or - shouldn't escape histogram
"""
names = []
insert = []
delete = []
delta = 0 # size change in bytes
namelen = 0
maxdiff = 0 # max number of changes for single file
# (for histogram width calculation)
for patch in self.items:
i,d = 0,0
for hunk in patch.hunks:
for line in hunk.text:
if line.startswith(b'+'):
i += 1
delta += len(line)-1
elif line.startswith(b'-'):
d += 1
delta -= len(line)-1
names.append(patch.target)
insert.append(i)
delete.append(d)
namelen = max(namelen, len(patch.target))
maxdiff = max(maxdiff, i+d)
output = ''
statlen = len(str(maxdiff)) # stats column width
for i,n in enumerate(names):
# %-19s | %-4d %s
format = " %-" + str(namelen) + "s | %" + str(statlen) + "s %s\n"
hist = ''
# -- calculating histogram --
width = len(format % ('', '', ''))
histwidth = max(2, 80 - width)
if maxdiff < histwidth:
hist = "+"*insert[i] + "-"*delete[i]
else:
iratio = (float(insert[i]) / maxdiff) * histwidth
dratio = (float(delete[i]) / maxdiff) * histwidth
# make sure every entry gets at least one + or -
iwidth = 1 if 0 < iratio < 1 else int(iratio)
dwidth = 1 if 0 < dratio < 1 else int(dratio)
#print(iratio, dratio, iwidth, dwidth, histwidth)
hist = "+"*int(iwidth) + "-"*int(dwidth)
# -- /calculating +- histogram --
output += (format % (tostr(names[i]), str(insert[i] + delete[i]), hist))
output += (" %d files changed, %d insertions(+), %d deletions(-), %+d bytes"
% (len(names), sum(insert), sum(delete), delta))
return output
|
(self)
|
42,054 |
patch_ng
|
dump
| null |
def dump(self):
for p in self.items:
for headline in p.header:
print(headline.rstrip('\n'))
print('--- ' + p.source)
print('+++ ' + p.target)
for h in p.hunks:
print('@@ -%s,%s +%s,%s @@' % (h.startsrc, h.linessrc, h.starttgt, h.linestgt))
for line in h.text:
print(line.rstrip('\n'))
|
(self)
|
42,055 |
patch_ng
|
findfiles
|
return tuple of source file, target file
|
def findfiles(self, old, new):
""" return tuple of source file, target file """
if old == b'/dev/null':
handle, abspath = tempfile.mkstemp(suffix='pypatch')
abspath = abspath.encode()
# The source file must contain a line for the hunk matching to succeed.
os.write(handle, b' ')
os.close(handle)
if not exists(new):
handle = open(new, 'wb')
handle.close()
return abspath, new
elif exists(old):
return old, old
elif exists(new):
return new, new
elif new == b'/dev/null':
return None, None
else:
# [w] Google Code generates broken patches with its online editor
debug("broken patch from Google Code, stripping prefixes..")
if old.startswith(b'a/') and new.startswith(b'b/'):
old, new = old[2:], new[2:]
debug(" %s" % old)
debug(" %s" % new)
if exists(old):
return old, old
elif exists(new):
return new, new
return None, None
|
(self, old, new)
|
42,056 |
patch_ng
|
parse
|
parse unified diff
return True on success
|
def parse(self, stream):
""" parse unified diff
return True on success
"""
lineends = dict(lf=0, crlf=0, cr=0)
nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
p = None
hunk = None
# hunkactual variable is used to calculate hunk lines for comparison
hunkactual = dict(linessrc=None, linestgt=None)
class wrapumerate(enumerate):
"""Enumerate wrapper that uses boolean end of stream status instead of
StopIteration exception, and properties to access line information.
"""
def __init__(self, *args, **kwargs):
# we don't call parent, it is magically created by __new__ method
self._exhausted = False
self._lineno = False # after end of stream equal to the num of lines
self._line = False # will be reset to False after end of stream
def next(self):
"""Try to read the next line and return True if it is available,
False if end of stream is reached."""
if self._exhausted:
return False
try:
self._lineno, self._line = compat_next(super(wrapumerate, self))
except StopIteration:
self._exhausted = True
self._line = False
return False
return True
@property
def is_empty(self):
return self._exhausted
@property
def line(self):
return self._line
@property
def lineno(self):
return self._lineno
# define states (possible file regions) that direct parse flow
headscan = True # start with scanning header
filenames = False # lines starting with --- and +++
hunkhead = False # @@ -R +R @@ sequence
hunkbody = False #
hunkskip = False # skipping invalid hunk mode
hunkparsed = False # state after successfully parsed hunk
# regexp to match start of hunk, used groups - 1,3,4,6
re_hunk_start = re.compile(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@")
self.errors = 0
# temp buffers for header and filenames info
header = []
srcname = None
tgtname = None
# start of main cycle
# each parsing block already has line available in fe.line
fe = wrapumerate(stream)
while fe.next():
# -- deciders: these only switch state to decide who should process
# -- line fetched at the start of this cycle
if hunkparsed:
hunkparsed = False
if re_hunk_start.match(fe.line):
hunkhead = True
elif fe.line.startswith(b"--- "):
filenames = True
else:
headscan = True
# -- ------------------------------------
# read out header
if headscan:
while not fe.is_empty and not fe.line.startswith(b"--- "):
header.append(fe.line)
fe.next()
if fe.is_empty:
if p is None:
debug("no patch data found") # error is shown later
self.errors += 1
else:
info("%d unparsed bytes left at the end of stream" % len(b''.join(header)))
self.warnings += 1
# TODO check for \No new line at the end..
# TODO test for unparsed bytes
# otherwise error += 1
# this is actually a loop exit
continue
headscan = False
# switch to filenames state
filenames = True
line = fe.line
lineno = fe.lineno
# hunkskip and hunkbody code skipped until definition of hunkhead is parsed
if hunkbody:
# [x] treat empty lines inside hunks as containing single space
# (this happens when diff is saved by copy/pasting to editor
# that strips trailing whitespace)
if line.strip(b"\r\n") == b"":
debug("expanding empty line in a middle of hunk body")
self.warnings += 1
line = b' ' + line
# process line first
if re.match(b"^[- \\+\\\\]", line):
# gather stats about line endings
if line.endswith(b"\r\n"):
p.hunkends["crlf"] += 1
elif line.endswith(b"\n"):
p.hunkends["lf"] += 1
elif line.endswith(b"\r"):
p.hunkends["cr"] += 1
if line.startswith(b"-"):
hunkactual["linessrc"] += 1
elif line.startswith(b"+"):
hunkactual["linestgt"] += 1
elif not line.startswith(b"\\"):
hunkactual["linessrc"] += 1
hunkactual["linestgt"] += 1
hunk.text.append(line)
# todo: handle \ No newline cases
else:
warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
# check exit conditions
if hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt:
warning("extra lines for hunk no.%d at %d for target %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
elif hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"]:
# hunk parsed successfully
p.hunks.append(hunk)
# switch to hunkparsed state
hunkbody = False
hunkparsed = True
# detect mixed window/unix line ends
ends = p.hunkends
if ((ends["cr"]!=0) + (ends["crlf"]!=0) + (ends["lf"]!=0)) > 1:
warning("inconsistent line ends in patch hunks for %s" % p.source)
self.warnings += 1
if debugmode:
debuglines = dict(ends)
debuglines.update(file=p.target, hunk=nexthunkno)
debug("crlf: %(crlf)d lf: %(lf)d cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
# fetch next line
continue
if hunkskip:
if re_hunk_start.match(line):
# switch to hunkhead state
hunkskip = False
hunkhead = True
elif line.startswith(b"--- "):
# switch to filenames state
hunkskip = False
filenames = True
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
if filenames:
if line.startswith(b"--- "):
if srcname != None:
# XXX testcase
warning("skipping false patch for %s" % srcname)
srcname = None
# XXX header += srcname
# double source filename line is encountered
# attempt to restart from this second line
# Files dated at Unix epoch don't exist, e.g.:
# '1970-01-01 01:00:00.000000000 +0100'
# They include timezone offsets.
# .. which can be parsed (if we remove the nanoseconds)
# .. by strptime() with:
# '%Y-%m-%d %H:%M:%S %z'
# .. but unfortunately this relies on the OSes libc
# strptime function and %z support is patchy, so we drop
# everything from the . onwards and group the year and time
# separately.
re_filename_date_time = b"^--- ([^\t]+)(?:\s([0-9-]+)\s([0-9:]+)|.*)"
match = re.match(re_filename_date_time, line)
# todo: support spaces in filenames
if match:
srcname = match.group(1).strip()
date = match.group(2)
time = match.group(3)
if (date == b'1970-01-01' or date == b'1969-12-31') and time.split(b':',1)[1] == b'00:00':
srcname = b'/dev/null'
else:
warning("skipping invalid filename at line %d" % (lineno+1))
self.errors += 1
# XXX p.header += line
# switch back to headscan state
filenames = False
headscan = True
elif not line.startswith(b"+++ "):
if srcname != None:
warning("skipping invalid patch with no target for %s" % srcname)
self.errors += 1
srcname = None
# XXX header += srcname
# XXX header += line
else:
# this should be unreachable
warning("skipping invalid target patch")
filenames = False
headscan = True
else:
if tgtname != None:
# XXX seems to be a dead branch
warning("skipping invalid patch - double target at line %d" % (lineno+1))
self.errors += 1
srcname = None
tgtname = None
# XXX header += srcname
# XXX header += tgtname
# XXX header += line
# double target filename line is encountered
# switch back to headscan state
filenames = False
headscan = True
else:
re_filename_date_time = b"^\+\+\+ ([^\t]+)(?:\s([0-9-]+)\s([0-9:]+)|.*)"
match = re.match(re_filename_date_time, line)
if not match:
warning("skipping invalid patch - no target filename at line %d" % (lineno+1))
self.errors += 1
srcname = None
# switch back to headscan state
filenames = False
headscan = True
else:
tgtname = match.group(1).strip()
date = match.group(2)
time = match.group(3)
if (date == b'1970-01-01' or date == b'1969-12-31') and time.split(b':',1)[1] == b'00:00':
tgtname = b'/dev/null'
if p: # for the first run p is None
self.items.append(p)
p = Patch()
p.source = srcname
srcname = None
p.target = tgtname
tgtname = None
p.header = header
header = []
# switch to hunkhead state
filenames = False
hunkhead = True
nexthunkno = 0
p.hunkends = lineends.copy()
continue
if hunkhead:
match = re.match(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@(.*)", line)
if not match:
if not p.hunks:
warning("skipping invalid patch with no hunks for file %s" % p.source)
self.errors += 1
# XXX review switch
# switch to headscan state
hunkhead = False
headscan = True
continue
else:
# TODO review condition case
# switch to headscan state
hunkhead = False
headscan = True
else:
hunk = Hunk()
hunk.startsrc = int(match.group(1))
hunk.linessrc = 1
if match.group(3): hunk.linessrc = int(match.group(3))
hunk.starttgt = int(match.group(4))
hunk.linestgt = 1
if match.group(6): hunk.linestgt = int(match.group(6))
hunk.invalid = False
hunk.desc = match.group(7)[1:].rstrip()
hunk.text = []
hunkactual["linessrc"] = hunkactual["linestgt"] = 0
# switch to hunkbody state
hunkhead = False
hunkbody = True
nexthunkno += 1
continue
# /while fe.next()
if p:
self.items.append(p)
if not hunkparsed:
if hunkskip:
warning("warning: finished with errors, some hunks may be invalid")
elif headscan:
if len(self.items) == 0:
warning("error: no patch data found!")
return False
else: # extra data at the end of file
pass
else:
warning("error: patch stream is incomplete!")
self.errors += 1
if len(self.items) == 0:
return False
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
# XXX fix total hunks calculation
debug("total files: %d total hunks: %d" % (len(self.items),
sum(len(p.hunks) for p in self.items)))
# ---- detect patch and patchset types ----
for idx, p in enumerate(self.items):
self.items[idx].type = self._detect_type(p)
types = set([p.type for p in self.items])
if len(types) > 1:
self.type = MIXED
else:
self.type = types.pop()
# --------
self._normalize_filenames()
return (self.errors == 0)
|
(self, stream)
|
42,057 |
patch_ng
|
patch_stream
|
Generator that yields stream patched with hunks iterable
Converts lineends in hunk lines to the best suitable format
autodetected from input
|
def patch_stream(self, instream, hunks):
""" Generator that yields stream patched with hunks iterable
Converts lineends in hunk lines to the best suitable format
autodetected from input
"""
# todo: At the moment substituted lineends may not be the same
# at the start and at the end of patching. Also issue a
# warning/throw about mixed lineends (is it really needed?)
hunks = iter(hunks)
srclineno = 1
lineends = {b'\n':0, b'\r\n':0, b'\r':0}
def get_line():
"""
local utility function - return line from source stream
collecting line end statistics on the way
"""
line = instream.readline()
# 'U' mode works only with text files
if line.endswith(b"\r\n"):
lineends[b"\r\n"] += 1
elif line.endswith(b"\n"):
lineends[b"\n"] += 1
elif line.endswith(b"\r"):
lineends[b"\r"] += 1
return line
for hno, h in enumerate(hunks):
debug("hunk %d" % (hno+1))
# skip to line just before hunk starts
while srclineno < h.startsrc:
yield get_line()
srclineno += 1
for hline in h.text:
# todo: check \ No newline at the end of file
if hline.startswith(b"-") or hline.startswith(b"\\"):
get_line()
srclineno += 1
continue
else:
if not hline.startswith(b"+"):
yield get_line()
srclineno += 1
continue
line2write = hline[1:]
# detect if line ends are consistent in source file
if sum([bool(lineends[x]) for x in lineends]) == 1:
newline = [x for x in lineends if lineends[x] != 0][0]
yield line2write.rstrip(b"\r\n")+newline
else: # newlines are mixed
yield line2write
for line in instream:
yield line
|
(self, instream, hunks)
|
42,058 |
patch_ng
|
revert
|
apply patch in reverse order
|
def revert(self, strip=0, root=None):
""" apply patch in reverse order """
reverted = copy.deepcopy(self)
reverted._reverse()
return reverted.apply(strip, root)
|
(self, strip=0, root=None)
|
42,059 |
patch_ng
|
strip_path
| null |
def strip_path(self, path, base_path, strip=0):
tokens = path.split("/")
if len(tokens) > 1:
tokens = tokens[strip:]
path = "/".join(tokens)
if base_path:
path = os.path.join(base_path, path)
return path
# account for new and deleted files, upstream dep won't fix them
|
(self, path, base_path, strip=0)
|
42,060 |
patch_ng
|
write_hunks
| null |
def write_hunks(self, srcname, tgtname, hunks):
src = open(srcname, "rb")
tgt = open(tgtname, "wb")
debug("processing target file %s" % tgtname)
tgt.writelines(self.patch_stream(src, hunks))
tgt.close()
src.close()
# [ ] TODO: add test for permission copy
shutil.copymode(srcname, tgtname)
return True
|
(self, srcname, tgtname, hunks)
|
42,062 |
posixpath
|
abspath
|
Return an absolute path.
|
def abspath(path):
"""Return an absolute path."""
path = os.fspath(path)
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
|
(path)
|
42,064 |
patch_ng
|
<lambda>
| null |
compat_next = lambda gen: gen.__next__()
|
(gen)
|
42,066 |
patch_ng
|
decode_text
| null |
def decode_text(text):
encodings = {codecs.BOM_UTF8: "utf_8_sig",
codecs.BOM_UTF16_BE: "utf_16_be",
codecs.BOM_UTF16_LE: "utf_16_le",
codecs.BOM_UTF32_BE: "utf_32_be",
codecs.BOM_UTF32_LE: "utf_32_le",
b'\x2b\x2f\x76\x38': "utf_7",
b'\x2b\x2f\x76\x39': "utf_7",
b'\x2b\x2f\x76\x2b': "utf_7",
b'\x2b\x2f\x76\x2f': "utf_7",
b'\x2b\x2f\x76\x38\x2d': "utf_7"}
for bom in sorted(encodings, key=len, reverse=True):
if text.startswith(bom):
try:
return text[len(bom):].decode(encodings[bom])
except UnicodeDecodeError:
continue
decoders = ["utf-8", "Windows-1252"]
for decoder in decoders:
try:
return text.decode(decoder)
except UnicodeDecodeError:
continue
logger.warning("can't decode %s" % str(text))
return text.decode("utf-8", "ignore") # Ignore not compatible characters
|
(text)
|
42,067 |
genericpath
|
exists
|
Test whether a path exists. Returns False for broken symbolic links
|
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except (OSError, ValueError):
return False
return True
|
(path)
|
42,068 |
patch_ng
|
fromfile
|
Parse patch file. If successful, returns
PatchSet() object. Otherwise returns False.
|
def fromfile(filename):
""" Parse patch file. If successful, returns
PatchSet() object. Otherwise returns False.
"""
patchset = PatchSet()
debug("reading %s" % filename)
fp = open(filename, "rb")
res = patchset.parse(fp)
fp.close()
if res == True:
return patchset
return False
|
(filename)
|
42,069 |
patch_ng
|
fromstring
|
Parse text string and return PatchSet()
object (or False if parsing fails)
|
def fromstring(s):
""" Parse text string and return PatchSet()
object (or False if parsing fails)
"""
ps = PatchSet( StringIO(s) )
if ps.errors == 0:
return ps
return False
|
(s)
|
42,070 |
patch_ng
|
fromurl
|
Parse patch from an URL, return False
if an error occured. Note that this also
can throw urlopen() exceptions.
|
def fromurl(url):
""" Parse patch from an URL, return False
if an error occured. Note that this also
can throw urlopen() exceptions.
"""
ps = PatchSet( urllib_request.urlopen(url) )
if ps.errors == 0:
return ps
return False
|
(url)
|
42,071 |
genericpath
|
isfile
|
Test whether a path is a regular file
|
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except (OSError, ValueError):
return False
return stat.S_ISREG(st.st_mode)
|
(path)
|
42,072 |
patch_ng
|
load
|
Loads a file content
|
def load(path, binary=False):
""" Loads a file content """
with open(path, 'rb') as handle:
tmp = handle.read()
return tmp if binary else decode_text(tmp)
|
(path, binary=False)
|
42,074 |
patch_ng
|
main
| null |
def main():
from optparse import OptionParser
from os.path import exists
import sys
opt = OptionParser(usage="1. %prog [options] unified.diff\n"
" 2. %prog [options] http://host/patch\n"
" 3. %prog [options] -- < unified.diff",
version="python-patch %s" % __version__)
opt.add_option("-q", "--quiet", action="store_const", dest="verbosity",
const=0, help="print only warnings and errors", default=1)
opt.add_option("-v", "--verbose", action="store_const", dest="verbosity",
const=2, help="be verbose")
opt.add_option("--debug", action="store_true", dest="debugmode", help="debug mode")
opt.add_option("--diffstat", action="store_true", dest="diffstat",
help="print diffstat and exit")
opt.add_option("-d", "--directory", metavar='DIR',
help="specify root directory for applying patch")
opt.add_option("-p", "--strip", type="int", metavar='N', default=0,
help="strip N path components from filenames")
opt.add_option("--revert", action="store_true",
help="apply patch in reverse order (unpatch)")
opt.add_option("-f", "--fuzz", action="store_true", dest="fuzz", help="Accept fuuzzy patches")
(options, args) = opt.parse_args()
if not args and sys.argv[-1:] != ['--']:
opt.print_version()
opt.print_help()
sys.exit()
readstdin = (sys.argv[-1:] == ['--'] and not args)
verbosity_levels = {0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
loglevel = verbosity_levels[options.verbosity]
logformat = "%(message)s"
logger.setLevel(loglevel)
streamhandler.setFormatter(logging.Formatter(logformat))
if options.debugmode:
setdebug() # this sets global debugmode variable
if readstdin:
patch = PatchSet(sys.stdin)
else:
patchfile = args[0]
urltest = patchfile.split(':')[0]
if (':' in patchfile and urltest.isalpha()
and len(urltest) > 1): # one char before : is a windows drive letter
patch = fromurl(patchfile)
else:
if not exists(patchfile) or not isfile(patchfile):
sys.exit("patch file does not exist - %s" % patchfile)
patch = fromfile(patchfile)
if options.diffstat:
print(patch.diffstat())
sys.exit(0)
if not patch:
error("Could not parse patch")
sys.exit(-1)
#pprint(patch)
if options.revert:
patch.revert(options.strip, root=options.directory) or sys.exit(-1)
else:
patch.apply(options.strip, root=options.directory, fuzz=options.fuzz) or sys.exit(-1)
# todo: document and test line ends handling logic - patch_ng.py detects proper line-endings
# for inserted hunks and issues a warning if patched file has incosistent line ends
|
()
|
42,076 |
patch_ng
|
pathstrip
|
Strip n leading components from the given path
|
def pathstrip(path, n):
""" Strip n leading components from the given path """
pathlist = [path]
while os.path.dirname(pathlist[0]) != b'':
pathlist[0:1] = os.path.split(pathlist[0])
return b'/'.join(pathlist[n:])
|
(path, n)
|
42,079 |
patch_ng
|
safe_unlink
| null |
def safe_unlink(filepath):
os.chmod(filepath, stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.unlink(filepath)
|
(filepath)
|
42,080 |
patch_ng
|
save
|
Saves a file with given content
Params:
path: path to write file to
content: contents to save in the file
only_if_modified: file won't be modified if the content hasn't changed
|
def save(path, content, only_if_modified=False):
"""
Saves a file with given content
Params:
path: path to write file to
content: contents to save in the file
only_if_modified: file won't be modified if the content hasn't changed
"""
try:
os.makedirs(os.path.dirname(path))
except Exception:
pass
new_content = to_file_bytes(content)
if only_if_modified and os.path.exists(path):
old_content = load(path, binary=True)
if old_content == new_content:
return
with open(path, "wb") as handle:
handle.write(new_content)
|
(path, content, only_if_modified=False)
|
42,081 |
patch_ng
|
setdebug
| null |
def setdebug():
global debugmode, streamhandler
debugmode = True
loglevel = logging.DEBUG
logformat = "%(levelname)8s %(message)s"
logger.setLevel(loglevel)
if streamhandler not in logger.handlers:
# when used as a library, streamhandler is not added
# by default
logger.addHandler(streamhandler)
streamhandler.setFormatter(logging.Formatter(logformat))
|
()
|
42,086 |
patch_ng
|
to_file_bytes
| null |
def to_file_bytes(content):
if PY3K:
if not isinstance(content, bytes):
content = bytes(content, "utf-8")
elif isinstance(content, unicode):
content = content.encode("utf-8")
return content
|
(content)
|
42,087 |
patch_ng
|
tostr
|
Python 3 bytes encoder. Used to print filename in
diffstat output. Assumes that filenames are in utf-8.
|
def tostr(b):
""" Python 3 bytes encoder. Used to print filename in
diffstat output. Assumes that filenames are in utf-8.
"""
if not PY3K:
return b
# [ ] figure out how to print non-utf-8 filenames without
# information loss
return b.decode('utf-8')
|
(b)
|
42,089 |
patch_ng
|
xisabs
|
Cross-platform version of `os.path.isabs()`
Returns True if `filename` is absolute on
Linux, OS X or Windows.
|
def xisabs(filename):
""" Cross-platform version of `os.path.isabs()`
Returns True if `filename` is absolute on
Linux, OS X or Windows.
"""
if filename.startswith(b'/'): # Linux/Unix
return True
elif filename.startswith(b'\\'): # Windows
return True
elif re.match(b'\\w:[\\\\/]', filename): # Windows
return True
return False
|
(filename)
|
42,090 |
patch_ng
|
xnormpath
|
Cross-platform version of os.path.normpath
|
def xnormpath(path):
""" Cross-platform version of os.path.normpath """
# replace escapes and Windows slashes
normalized = posixpath.normpath(path).replace(b'\\', b'/')
# fold the result
return posixpath.normpath(normalized)
|
(path)
|
42,091 |
patch_ng
|
xstrip
|
Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security.
|
def xstrip(filename):
""" Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security.
"""
while xisabs(filename):
# strip windows drive with all slashes
if re.match(b'\\w:[\\\\/]', filename):
filename = re.sub(b'^\\w+:[\\\\/]+', b'', filename)
# strip all slashes
elif re.match(b'[\\\\/]', filename):
filename = re.sub(b'^[\\\\/]+', b'', filename)
return filename
|
(filename)
|
42,094 |
sparse_dot_topn.api
|
awesome_cossim_topn
|
This function will be removed and replaced with `sp_matmul_topn`.
NOTE this function calls `sp_matmul_topn` but the results may not be the same.
See the migration guide at 'https://github.com/ing-bank/sparse_dot_topn#migration' for details.
This function will return a matrix C in CSR format, where
C = [sorted top n results > lower_bound for each row of A * B].
If return_best_ntop=True then best_ntop
(the true maximum number of elements > lower_bound per row of A * B)
will also be returned in a tuple together with C as (C, best_ntop).
Args:
A: LHS of the multiplication, the number of columns of A determines the orientation of B.
`A` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `B`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
B: RHS of the multiplication, the number of rows of B must match the number of columns of A or the shape of B.T should be match A.
`B` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `A`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
ntop: top n results
lower_bound: a threshold that the element of A*B must be greater than
use_threads: use multi-thread or not
n_jobs: number of thread, must be >= 1
return_best_ntop: (default: False) if True, will return best_ntop together
with C as a tuple: (C, best_ntop)
test_nnz_max: deprecated argument, cannot be used
Returns:
C: result matrix (returned alone, if return_best_ntop=False)
best_ntop: The true maximum number of elements > lower_bound per row of
A * B returned together with C as a tuple: (C, best_ntop). It is
returned only if return_best_ntop=True.
N.B. if A and B are not in CSR format, they will be converted to CSR
|
def awesome_cossim_topn(
A, B, ntop, lower_bound=0, use_threads=False, n_jobs=1, return_best_ntop=None, test_nnz_max=None
):
"""This function will be removed and replaced with `sp_matmul_topn`.
NOTE this function calls `sp_matmul_topn` but the results may not be the same.
See the migration guide at 'https://github.com/ing-bank/sparse_dot_topn#migration' for details.
This function will return a matrix C in CSR format, where
C = [sorted top n results > lower_bound for each row of A * B].
If return_best_ntop=True then best_ntop
(the true maximum number of elements > lower_bound per row of A * B)
will also be returned in a tuple together with C as (C, best_ntop).
Args:
A: LHS of the multiplication, the number of columns of A determines the orientation of B.
`A` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `B`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
B: RHS of the multiplication, the number of rows of B must match the number of columns of A or the shape of B.T should be match A.
`B` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `A`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
ntop: top n results
lower_bound: a threshold that the element of A*B must be greater than
use_threads: use multi-thread or not
n_jobs: number of thread, must be >= 1
return_best_ntop: (default: False) if True, will return best_ntop together
with C as a tuple: (C, best_ntop)
test_nnz_max: deprecated argument, cannot be used
Returns:
C: result matrix (returned alone, if return_best_ntop=False)
best_ntop: The true maximum number of elements > lower_bound per row of
A * B returned together with C as a tuple: (C, best_ntop). It is
returned only if return_best_ntop=True.
N.B. if A and B are not in CSR format, they will be converted to CSR
"""
msg = (
"`awesome_cossim_topn` function will be removed and (partially) replaced with `sp_matmul_topn`."
" See the migration guide at 'https://github.com/ing-bank/sparse_dot_topn#readme'."
)
if test_nnz_max is not None:
raise DeprecationWarning(msg)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
n_threads = n_jobs if use_threads is True else None
C = sp_matmul_topn(A=A, B=B, top_n=ntop, threshold=lower_bound, sort=True, n_threads=n_threads)
if return_best_ntop:
return C, np.diff(C.indptr).max()
return C
|
(A, B, ntop, lower_bound=0, use_threads=False, n_jobs=1, return_best_ntop=None, test_nnz_max=None)
|
42,097 |
sparse_dot_topn.api
|
sp_matmul
|
Compute A * B whilst only storing the `top_n` elements.
This functions allows large matrices to multiplied with a limited memory footprint.
Args:
A: LHS of the multiplication, the number of columns of A determines the orientation of B.
`A` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `B`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
B: RHS of the multiplication, the number of rows of B must match the number of columns of A or the shape of B.T should be match A.
`B` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `A`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
n_threads: number of threads to use, `None` implies sequential processing, -1 will use all but one of the available cores.
idx_dtype: dtype to use for the indices, defaults to 32bit integers
Throws:
TypeError: when A, B are not trivially convertable to a `CSR matrix`
Returns:
C: result matrix
|
def sp_matmul(
A: csr_matrix | csc_matrix | coo_matrix,
B: csr_matrix | csc_matrix | coo_matrix,
n_threads: int | None = None,
idx_dtype: DTypeLike | None = None,
) -> csr_matrix:
"""Compute A * B whilst only storing the `top_n` elements.
This functions allows large matrices to multiplied with a limited memory footprint.
Args:
A: LHS of the multiplication, the number of columns of A determines the orientation of B.
`A` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `B`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
B: RHS of the multiplication, the number of rows of B must match the number of columns of A or the shape of B.T should be match A.
`B` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `A`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
n_threads: number of threads to use, `None` implies sequential processing, -1 will use all but one of the available cores.
idx_dtype: dtype to use for the indices, defaults to 32bit integers
Throws:
TypeError: when A, B are not trivially convertable to a `CSR matrix`
Returns:
C: result matrix
"""
idx_dtype = assert_idx_dtype(idx_dtype)
n_threads: int = n_threads or 1
if n_threads < 0:
n_threads = _N_CORES
if isinstance(A, csc_matrix) and isinstance(B, csc_matrix) and A.shape[0] == B.shape[1]:
A = A.transpose()
B = B.transpose()
elif isinstance(A, (coo_matrix, csc_matrix)):
A = A.tocsr(False)
elif not isinstance(A, csr_matrix):
msg = f"type of `A` must be one of `csr_matrix`, `csc_matrix` or `csr_matrix`, got `{type(A)}`"
raise TypeError(msg)
if not isinstance(B, (csr_matrix, coo_matrix, csc_matrix)):
msg = f"type of `B` must be one of `csr_matrix`, `csc_matrix` or `csr_matrix`, got `{type(B)}`"
raise TypeError(msg)
A_nrows, A_ncols = A.shape
B_nrows, B_ncols = B.shape
if A_ncols == B_nrows:
if isinstance(B, (coo_matrix, csc_matrix)):
B = B.tocsr(False)
elif A_ncols == B_ncols:
B = B.transpose() if isinstance(B, csc_matrix) else B.transpose().tocsr(False)
B_nrows, B_ncols = B.shape
else:
msg = (
"Matrices `A` and `B` have incompatible shapes. `A.shape[1]` must be equal to `B.shape[0]` or `B.shape[1]`."
)
raise ValueError(msg)
assert_supported_dtype(A)
assert_supported_dtype(B)
ensure_compatible_dtype(A, B)
# basic check. if A or B are all zeros matrix, return all zero matrix directly
if A.indices.size == 0 or B.indices.size == 0:
C_indptr = np.zeros(A_nrows + 1, dtype=idx_dtype)
C_indices = np.zeros(1, dtype=idx_dtype)
C_data = np.zeros(1, dtype=A.dtype)
return csr_matrix((C_data, C_indices, C_indptr), shape=(A_nrows, B_ncols))
kwargs = {
"nrows": A_nrows,
"ncols": B_ncols,
"A_data": A.data,
"A_indptr": A.indptr if idx_dtype is None else A.indptr.astype(idx_dtype),
"A_indices": A.indices if idx_dtype is None else A.indices.astype(idx_dtype),
"B_data": B.data,
"B_indptr": B.indptr if idx_dtype is None else B.indptr.astype(idx_dtype),
"B_indices": B.indices if idx_dtype is None else B.indices.astype(idx_dtype),
}
func = _core.sp_matmul
if n_threads > 1:
if _core._has_openmp_support:
kwargs["n_threads"] = n_threads
func = _core.sp_matmul_mt
else:
msg = "sparse_dot_topn: extension was compiled without parallelisation (OpenMP) support, ignoring ``n_threads``"
warnings.warn(msg, stacklevel=1)
return csr_matrix(func(**kwargs), shape=(A_nrows, B_ncols))
|
(A: 'csr_matrix | csc_matrix | coo_matrix', B: 'csr_matrix | csc_matrix | coo_matrix', n_threads: 'int | None' = None, idx_dtype: 'DTypeLike | None' = None) -> 'csr_matrix'
|
42,098 |
sparse_dot_topn.api
|
sp_matmul_topn
|
Compute A * B whilst only storing the `top_n` elements.
This functions allows large matrices to multiplied with a limited memory footprint.
Args:
A: LHS of the multiplication, the number of columns of A determines the orientation of B.
`A` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `B`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
B: RHS of the multiplication, the number of rows of B must match the number of columns of A or the shape of B.T should be match A.
`B` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `A`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
top_n: the number of results to retain
sort: return C in a format where the first non-zero element of each row is the largest value
threshold: only return values greater than the threshold
density: the expected density of the result considering `top_n`. The expected number of non-zero elements
in C should <= (`density` * `top_n` * `A.shape[0]`) otherwise the memory has to reallocated.
This value should only be set if you have a strong expectation as being wrong incurs a realloaction penalty.
n_threads: number of threads to use, `None` implies sequential processing, -1 will use all but one of the available cores.
idx_dtype: dtype to use for the indices, defaults to 32bit integers
Throws:
TypeError: when A, B are not trivially convertable to a `CSR matrix`
Returns:
C: result matrix
|
def sp_matmul_topn(
A: csr_matrix | csc_matrix | coo_matrix,
B: csr_matrix | csc_matrix | coo_matrix,
top_n: int,
threshold: int | float | None = None,
sort: bool = False,
density: float | None = None,
n_threads: int | None = None,
idx_dtype: DTypeLike | None = None,
) -> csr_matrix:
"""Compute A * B whilst only storing the `top_n` elements.
This functions allows large matrices to multiplied with a limited memory footprint.
Args:
A: LHS of the multiplication, the number of columns of A determines the orientation of B.
`A` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `B`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
B: RHS of the multiplication, the number of rows of B must match the number of columns of A or the shape of B.T should be match A.
`B` must be have an {32, 64}bit {int, float} dtype that is of the same kind as `A`.
Note the matrix is converted (copied) to CSR format if a CSC or COO matrix.
top_n: the number of results to retain
sort: return C in a format where the first non-zero element of each row is the largest value
threshold: only return values greater than the threshold
density: the expected density of the result considering `top_n`. The expected number of non-zero elements
in C should <= (`density` * `top_n` * `A.shape[0]`) otherwise the memory has to reallocated.
This value should only be set if you have a strong expectation as being wrong incurs a realloaction penalty.
n_threads: number of threads to use, `None` implies sequential processing, -1 will use all but one of the available cores.
idx_dtype: dtype to use for the indices, defaults to 32bit integers
Throws:
TypeError: when A, B are not trivially convertable to a `CSR matrix`
Returns:
C: result matrix
"""
n_threads: int = n_threads or 1
if n_threads < 0:
n_threads = _N_CORES
density: float = density or 1.0
idx_dtype = assert_idx_dtype(idx_dtype)
if isinstance(A, csc_matrix) and isinstance(B, csc_matrix) and A.shape[0] == B.shape[1]:
A = A.transpose()
B = B.transpose()
elif isinstance(A, (coo_matrix, csc_matrix)):
A = A.tocsr(False)
elif not isinstance(A, csr_matrix):
msg = f"type of `A` must be one of `csr_matrix`, `csc_matrix` or `csr_matrix`, got `{type(A)}`"
raise TypeError(msg)
if not isinstance(B, (csr_matrix, coo_matrix, csc_matrix)):
msg = f"type of `B` must be one of `csr_matrix`, `csc_matrix` or `csr_matrix`, got `{type(B)}`"
raise TypeError(msg)
A_nrows, A_ncols = A.shape
B_nrows, B_ncols = B.shape
if A_ncols == B_nrows:
if isinstance(B, (coo_matrix, csc_matrix)):
B = B.tocsr(False)
elif A_ncols == B_ncols:
B = B.transpose() if isinstance(B, csc_matrix) else B.transpose().tocsr(False)
B_nrows, B_ncols = B.shape
else:
msg = (
"Matrices `A` and `B` have incompatible shapes. `A.shape[1]` must be equal to `B.shape[0]` or `B.shape[1]`."
)
raise ValueError(msg)
if B_ncols == top_n and (sort is False) and (threshold is None):
return sp_matmul(A, B, n_threads)
assert_supported_dtype(A)
assert_supported_dtype(B)
ensure_compatible_dtype(A, B)
# guard against top_n larger than number of cols
top_n = min(top_n, B_ncols)
# handle threshold
if threshold is not None:
threshold = int(np.rint(threshold)) if np.issubdtype(A.data.dtype, np.integer) else float(threshold)
# basic check. if A or B are all zeros matrix, return all zero matrix directly
if A.indices.size == 0 or B.indices.size == 0:
C_indptr = np.zeros(A_nrows + 1, dtype=idx_dtype)
C_indices = np.zeros(1, dtype=idx_dtype)
C_data = np.zeros(1, dtype=A.dtype)
return csr_matrix((C_data, C_indices, C_indptr), shape=(A_nrows, B_ncols))
kwargs = {
"top_n": top_n,
"nrows": A_nrows,
"ncols": B_ncols,
"threshold": threshold,
"density": density,
"A_data": A.data,
"A_indptr": A.indptr if idx_dtype is None else A.indptr.astype(idx_dtype),
"A_indices": A.indices if idx_dtype is None else A.indices.astype(idx_dtype),
"B_data": B.data,
"B_indptr": B.indptr if idx_dtype is None else B.indptr.astype(idx_dtype),
"B_indices": B.indices if idx_dtype is None else B.indices.astype(idx_dtype),
}
func = _core.sp_matmul_topn if not sort else _core.sp_matmul_topn_sorted
if n_threads > 1:
if _core._has_openmp_support:
kwargs["n_threads"] = n_threads
kwargs.pop("density")
func = _core.sp_matmul_topn_mt if not sort else _core.sp_matmul_topn_sorted_mt
else:
msg = "sparse_dot_topn: extension was compiled without parallelisation (OpenMP) support, ignoring ``n_threads``"
warnings.warn(msg, stacklevel=1)
return csr_matrix(func(**kwargs), shape=(A_nrows, B_ncols))
|
(A: 'csr_matrix | csc_matrix | coo_matrix', B: 'csr_matrix | csc_matrix | coo_matrix', top_n: 'int', threshold: 'int | float | None' = None, sort: 'bool' = False, density: 'float | None' = None, n_threads: 'int | None' = None, idx_dtype: 'DTypeLike | None' = None) -> 'csr_matrix'
|
42,100 |
sparse_dot_topn.api
|
zip_sp_matmul_topn
|
Compute zip-matrix C = zip_i C_i = zip_i A * B_i = A * B whilst only storing the `top_n` elements.
Combine the sub-matrices together and keep only the `top_n` elements per row.
Pre-calling this function, matrix B has been split row-wise into chunks B_i, and C_i = A * B_i have been calculated.
This function computes C = zip_i C_i, which is equivalent to A * B when only keeping the `top_n` elements.
It allows very large matrices to be split and multiplied with a limited memory footprint.
Args:
top_n: the number of results to retain; should be smaller or equal to top_n used to obtain C_mats.
C_mats: a list with each C_i sub-matrix, with format csr_matrix.
Returns:
C: zipped result matrix
Raises:
TypeError: when not all elements of `C_mats` is a csr_matrix or trivially convertable
ValueError: when not all elements of `C_mats` has the same number of rows
|
def zip_sp_matmul_topn(top_n: int, C_mats: list[csr_matrix]) -> csr_matrix:
"""Compute zip-matrix C = zip_i C_i = zip_i A * B_i = A * B whilst only storing the `top_n` elements.
Combine the sub-matrices together and keep only the `top_n` elements per row.
Pre-calling this function, matrix B has been split row-wise into chunks B_i, and C_i = A * B_i have been calculated.
This function computes C = zip_i C_i, which is equivalent to A * B when only keeping the `top_n` elements.
It allows very large matrices to be split and multiplied with a limited memory footprint.
Args:
top_n: the number of results to retain; should be smaller or equal to top_n used to obtain C_mats.
C_mats: a list with each C_i sub-matrix, with format csr_matrix.
Returns:
C: zipped result matrix
Raises:
TypeError: when not all elements of `C_mats` is a csr_matrix or trivially convertable
ValueError: when not all elements of `C_mats` has the same number of rows
"""
_nrows = []
ncols = []
data = []
indptr = []
indices = []
for C in C_mats:
# check correct type of each C
if isinstance(C, (coo_matrix, csc_matrix)):
C = C.tocsr(False)
elif not isinstance(C, csr_matrix):
msg = f"type of `C` must be one of `csr_matrix`, `csc_matrix` or `csr_matrix`, got `{type(C)}`"
raise TypeError(msg)
nrows, c_nc = C.shape
_nrows.append(nrows)
ncols.append(c_nc)
data.append(C.data)
indptr.append(C.indptr)
indices.append(C.indices)
ncols = np.asarray(ncols, int)
total_cols = ncols.sum()
if not np.all(np.diff(_nrows) == 0):
msg = "Each `C` in `C_mats` should have the same number of rows."
raise ValueError(msg)
return csr_matrix(
_core.zip_sp_matmul_topn(
top_n=top_n, Z_max_nnz=nrows * top_n, nrows=nrows, B_ncols=ncols, data=data, indptr=indptr, indices=indices
),
shape=(nrows, total_cols),
)
|
(top_n: int, C_mats: list[scipy.sparse._csr.csr_matrix]) -> scipy.sparse._csr.csr_matrix
|
42,101 |
flask_restful
|
Api
|
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = restful.Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
:param app: the Flask application object
:type app: flask.Flask or flask.Blueprint
:param prefix: Prefix all routes with a value, eg v1 or 2010-04-01
:type prefix: str
:param default_mediatype: The default media type to return
:type default_mediatype: str
:param decorators: Decorators to attach to every resource
:type decorators: list
:param catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param serve_challenge_on_401: Whether to serve a challenge response to
clients on receiving 401. This usually leads to a username/password
popup in web browsers.
:param url_part_order: A string that controls the order that the pieces
of the url are concatenated when the full url is constructed. 'b'
is the blueprint (or blueprint registration) prefix, 'a' is the api
prefix, and 'e' is the path component the endpoint is added with
:type catch_all_404s: bool
:param errors: A dictionary to define a custom response for each
exception or error raised during a request
:type errors: dict
|
class Api(object):
"""
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = restful.Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
:param app: the Flask application object
:type app: flask.Flask or flask.Blueprint
:param prefix: Prefix all routes with a value, eg v1 or 2010-04-01
:type prefix: str
:param default_mediatype: The default media type to return
:type default_mediatype: str
:param decorators: Decorators to attach to every resource
:type decorators: list
:param catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param serve_challenge_on_401: Whether to serve a challenge response to
clients on receiving 401. This usually leads to a username/password
popup in web browsers.
:param url_part_order: A string that controls the order that the pieces
of the url are concatenated when the full url is constructed. 'b'
is the blueprint (or blueprint registration) prefix, 'a' is the api
prefix, and 'e' is the path component the endpoint is added with
:type catch_all_404s: bool
:param errors: A dictionary to define a custom response for each
exception or error raised during a request
:type errors: dict
"""
def __init__(self, app=None, prefix='',
default_mediatype='application/json', decorators=None,
catch_all_404s=False, serve_challenge_on_401=False,
url_part_order='bae', errors=None):
self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.serve_challenge_on_401 = serve_challenge_on_401
self.url_part_order = url_part_order
self.errors = errors or {}
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
self.blueprint = None
if app is not None:
self.app = app
self.init_app(app)
def init_app(self, app):
"""Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
:type app: flask.Flask
:type app: flask.Blueprint
Examples::
api = Api()
api.add_resource(...)
api.init_app(app)
"""
# If app is a blueprint, defer the initialization
try:
app.record(self._deferred_blueprint_init)
# Flask.Blueprint has a 'record' attribute, Flask.Api does not
except AttributeError:
self._init_app(app)
else:
self.blueprint = app
def _complete_url(self, url_part, registration_prefix):
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
"""
parts = {
'b': registration_prefix,
'a': self.prefix,
'e': url_part
}
return ''.join(parts[key] for key in self.url_part_order if parts[key])
@staticmethod
def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):
"""Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
"""
if callable(rule):
rule = rule(blueprint_setup.url_prefix)
elif blueprint_setup.url_prefix:
rule = blueprint_setup.url_prefix + rule
options.setdefault('subdomain', blueprint_setup.subdomain)
if endpoint is None:
endpoint = view_func.__name__
defaults = blueprint_setup.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
def _deferred_blueprint_init(self, setup_state):
"""Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restful.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
"""
self.blueprint_setup = setup_state
if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
setup_state._original_add_url_rule = setup_state.add_url_rule
setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch,
setup_state)
if not setup_state.first_registration:
raise ValueError('flask-restful blueprints can only be registered once.')
self._init_app(setup_state.app)
def _init_app(self, app):
"""Perform initialization actions with the given :class:`flask.Flask`
object.
:param app: The flask application object
:type app: flask.Flask
"""
app.handle_exception = partial(self.error_router, app.handle_exception)
app.handle_user_exception = partial(self.error_router, app.handle_user_exception)
if len(self.resources) > 0:
for resource, urls, kwargs in self.resources:
self._register_view(app, resource, *urls, **kwargs)
def owns_endpoint(self, endpoint):
"""Tests if an endpoint name (not path) belongs to this Api. Takes
in to account the Blueprint name part of the endpoint name.
:param endpoint: The name of the endpoint being checked
:return: bool
"""
if self.blueprint:
if endpoint.startswith(self.blueprint.name):
endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
else:
return False
return endpoint in self.endpoints
def _should_use_fr_error_handler(self):
""" Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
"""
adapter = current_app.create_url_adapter(request)
try:
adapter.match()
except MethodNotAllowed as e:
# Check if the other HTTP methods at this url would hit the Api
valid_route_method = e.valid_methods[0]
rule, _ = adapter.match(method=valid_route_method, return_rule=True)
return self.owns_endpoint(rule.endpoint)
except NotFound:
return self.catch_all_404s
except:
# Werkzeug throws other kinds of exceptions, such as Redirect
pass
def _has_fr_route(self):
"""Encapsulating the rules for whether the request was to a Flask endpoint"""
# 404's, 405's, which might not have a url_rule
if self._should_use_fr_error_handler():
return True
# for all other errors, just check if FR dispatched the route
if not request.url_rule:
return False
return self.owns_endpoint(request.url_rule.endpoint)
def error_router(self, original_handler, e):
"""This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restful endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
"""
if self._has_fr_route():
try:
return self.handle_error(e)
except Exception:
pass # Fall through to original handler
return original_handler(e)
def handle_error(self, e):
"""Error handler for the API transforms a raised exception into a Flask
response, with the appropriate HTTP status code and body.
:param e: the raised Exception object
:type e: Exception
"""
got_request_exception.send(current_app._get_current_object(), exception=e)
_handle_flask_propagate_exceptions_config(current_app, e)
headers = Headers()
if isinstance(e, HTTPException):
if e.response is not None:
# If HTTPException is initialized with a response, then return e.get_response().
# This prevents specified error response from being overridden.
# e.g., HTTPException(response=Response("Hello World"))
resp = e.get_response()
return resp
code = e.code
default_data = {
'message': getattr(e, 'description', http_status_message(code))
}
headers = e.get_response().headers
else:
code = 500
default_data = {
'message': http_status_message(code),
}
# Werkzeug exceptions generate a content-length header which is added
# to the response in addition to the actual content-length header
# https://github.com/flask-restful/flask-restful/issues/534
remove_headers = ('Content-Length',)
for header in remove_headers:
headers.pop(header, None)
data = getattr(e, 'data', default_data)
if code and code >= 500:
exc_info = sys.exc_info()
if exc_info[1] is None:
exc_info = None
current_app.log_exception(exc_info)
error_cls_name = type(e).__name__
if error_cls_name in self.errors:
custom_data = self.errors.get(error_cls_name, {})
code = custom_data.get('status', 500)
data.update(custom_data)
if code == 406 and self.default_mediatype is None:
# if we are handling NotAcceptable (406), make sure that
# make_response uses a representation we support as the
# default mediatype (so that make_response doesn't throw
# another NotAcceptable error).
supported_mediatypes = list(self.representations.keys())
fallback_mediatype = supported_mediatypes[0] if supported_mediatypes else "text/plain"
resp = self.make_response(
data,
code,
headers,
fallback_mediatype = fallback_mediatype
)
else:
resp = self.make_response(data, code, headers)
if code == 401:
resp = self.unauthorized(resp)
return resp
def mediatypes_method(self):
"""Return a method that returns a list of mediatypes
"""
return lambda resource_cls: self.mediatypes() + [self.default_mediatype]
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Type[Resource]`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app is not None:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs))
def resource(self, *urls, **kwargs):
"""Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
"""
def decorator(cls):
self.add_resource(cls, *urls, **kwargs)
return cls
return decorator
def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()
self.endpoints.add(endpoint)
resource_class_args = kwargs.pop('resource_class_args', ())
resource_class_kwargs = kwargs.pop('resource_class_kwargs', {})
# NOTE: 'view_functions' is cleaned up from Blueprint class in Flask 1.0
if endpoint in getattr(app, 'view_functions', {}):
previous_view_class = app.view_functions[endpoint].__dict__['view_class']
# if you override the endpoint with a different class, avoid the collision by raising an exception
if previous_view_class != resource:
raise ValueError('This endpoint (%s) is already set to the class %s.' % (endpoint, previous_view_class.__name__))
resource.mediatypes = self.mediatypes_method() # Hacky
resource.endpoint = endpoint
resource_func = self.output(resource.as_view(endpoint, *resource_class_args,
**resource_class_kwargs))
for decorator in self.decorators:
resource_func = decorator(resource_func)
for url in urls:
# If this Api has a blueprint
if self.blueprint:
# And this Api has been setup
if self.blueprint_setup:
# Set the rule to a string directly, as the blueprint is already
# set up.
self.blueprint_setup.add_url_rule(url, view_func=resource_func, **kwargs)
continue
else:
# Set the rule to a function that expects the blueprint prefix
# to construct the final url. Allows deferment of url finalization
# in the case that the associated Blueprint has not yet been
# registered to an application, so we can wait for the registration
# prefix
rule = partial(self._complete_url, url)
else:
# If we've got no Blueprint, just build a url with no prefix
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_url_rule(rule, view_func=resource_func, **kwargs)
def output(self, resource):
"""Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
resp = resource(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
data, code, headers = unpack(resp)
return self.make_response(data, code, headers=headers)
return wrapper
def url_for(self, resource, **values):
"""Generates a URL to the given resource.
Works like :func:`flask.url_for`."""
endpoint = resource.endpoint
if self.blueprint:
endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)
return url_for(endpoint, **values)
def make_response(self, data, *args, **kwargs):
"""Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
"""
default_mediatype = kwargs.pop('fallback_mediatype', None) or self.default_mediatype
mediatype = request.accept_mimetypes.best_match(
self.representations,
default=default_mediatype,
)
if mediatype is None:
raise NotAcceptable()
if mediatype in self.representations:
resp = self.representations[mediatype](data, *args, **kwargs)
resp.headers['Content-Type'] = mediatype
return resp
elif mediatype == 'text/plain':
resp = original_flask_make_response(str(data), *args, **kwargs)
resp.headers['Content-Type'] = 'text/plain'
return resp
else:
raise InternalServerError()
def mediatypes(self):
"""Returns a list of requested mediatypes sent in the Accept header"""
return [h for h, q in sorted(request.accept_mimetypes,
key=operator.itemgetter(1), reverse=True)]
def representation(self, mediatype):
"""Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
"""
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
def unauthorized(self, response):
""" Given a response, change it to ask for credentials """
if self.serve_challenge_on_401:
realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restful")
challenge = u"{0} realm=\"{1}\"".format("Basic", realm)
response.headers['WWW-Authenticate'] = challenge
return response
|
(app=None, prefix='', default_mediatype='application/json', decorators=None, catch_all_404s=False, serve_challenge_on_401=False, url_part_order='bae', errors=None)
|
42,102 |
flask_restful
|
__init__
| null |
def __init__(self, app=None, prefix='',
default_mediatype='application/json', decorators=None,
catch_all_404s=False, serve_challenge_on_401=False,
url_part_order='bae', errors=None):
self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.serve_challenge_on_401 = serve_challenge_on_401
self.url_part_order = url_part_order
self.errors = errors or {}
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
self.blueprint = None
if app is not None:
self.app = app
self.init_app(app)
|
(self, app=None, prefix='', default_mediatype='application/json', decorators=None, catch_all_404s=False, serve_challenge_on_401=False, url_part_order='bae', errors=None)
|
42,103 |
flask_restful
|
_blueprint_setup_add_url_rule_patch
|
Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
|
@staticmethod
def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):
"""Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
"""
if callable(rule):
rule = rule(blueprint_setup.url_prefix)
elif blueprint_setup.url_prefix:
rule = blueprint_setup.url_prefix + rule
options.setdefault('subdomain', blueprint_setup.subdomain)
if endpoint is None:
endpoint = view_func.__name__
defaults = blueprint_setup.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
|
(blueprint_setup, rule, endpoint=None, view_func=None, **options)
|
42,104 |
flask_restful
|
_complete_url
|
This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
|
def _complete_url(self, url_part, registration_prefix):
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
"""
parts = {
'b': registration_prefix,
'a': self.prefix,
'e': url_part
}
return ''.join(parts[key] for key in self.url_part_order if parts[key])
|
(self, url_part, registration_prefix)
|
42,105 |
flask_restful
|
_deferred_blueprint_init
|
Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restful.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
|
def _deferred_blueprint_init(self, setup_state):
"""Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restful.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
"""
self.blueprint_setup = setup_state
if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
setup_state._original_add_url_rule = setup_state.add_url_rule
setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch,
setup_state)
if not setup_state.first_registration:
raise ValueError('flask-restful blueprints can only be registered once.')
self._init_app(setup_state.app)
|
(self, setup_state)
|
42,106 |
flask_restful
|
_has_fr_route
|
Encapsulating the rules for whether the request was to a Flask endpoint
|
def _has_fr_route(self):
"""Encapsulating the rules for whether the request was to a Flask endpoint"""
# 404's, 405's, which might not have a url_rule
if self._should_use_fr_error_handler():
return True
# for all other errors, just check if FR dispatched the route
if not request.url_rule:
return False
return self.owns_endpoint(request.url_rule.endpoint)
|
(self)
|
42,107 |
flask_restful
|
_init_app
|
Perform initialization actions with the given :class:`flask.Flask`
object.
:param app: The flask application object
:type app: flask.Flask
|
def _init_app(self, app):
"""Perform initialization actions with the given :class:`flask.Flask`
object.
:param app: The flask application object
:type app: flask.Flask
"""
app.handle_exception = partial(self.error_router, app.handle_exception)
app.handle_user_exception = partial(self.error_router, app.handle_user_exception)
if len(self.resources) > 0:
for resource, urls, kwargs in self.resources:
self._register_view(app, resource, *urls, **kwargs)
|
(self, app)
|
42,108 |
flask_restful
|
_register_view
| null |
def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()
self.endpoints.add(endpoint)
resource_class_args = kwargs.pop('resource_class_args', ())
resource_class_kwargs = kwargs.pop('resource_class_kwargs', {})
# NOTE: 'view_functions' is cleaned up from Blueprint class in Flask 1.0
if endpoint in getattr(app, 'view_functions', {}):
previous_view_class = app.view_functions[endpoint].__dict__['view_class']
# if you override the endpoint with a different class, avoid the collision by raising an exception
if previous_view_class != resource:
raise ValueError('This endpoint (%s) is already set to the class %s.' % (endpoint, previous_view_class.__name__))
resource.mediatypes = self.mediatypes_method() # Hacky
resource.endpoint = endpoint
resource_func = self.output(resource.as_view(endpoint, *resource_class_args,
**resource_class_kwargs))
for decorator in self.decorators:
resource_func = decorator(resource_func)
for url in urls:
# If this Api has a blueprint
if self.blueprint:
# And this Api has been setup
if self.blueprint_setup:
# Set the rule to a string directly, as the blueprint is already
# set up.
self.blueprint_setup.add_url_rule(url, view_func=resource_func, **kwargs)
continue
else:
# Set the rule to a function that expects the blueprint prefix
# to construct the final url. Allows deferment of url finalization
# in the case that the associated Blueprint has not yet been
# registered to an application, so we can wait for the registration
# prefix
rule = partial(self._complete_url, url)
else:
# If we've got no Blueprint, just build a url with no prefix
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_url_rule(rule, view_func=resource_func, **kwargs)
|
(self, app, resource, *urls, **kwargs)
|
42,109 |
flask_restful
|
_should_use_fr_error_handler
|
Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
|
def _should_use_fr_error_handler(self):
""" Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
"""
adapter = current_app.create_url_adapter(request)
try:
adapter.match()
except MethodNotAllowed as e:
# Check if the other HTTP methods at this url would hit the Api
valid_route_method = e.valid_methods[0]
rule, _ = adapter.match(method=valid_route_method, return_rule=True)
return self.owns_endpoint(rule.endpoint)
except NotFound:
return self.catch_all_404s
except:
# Werkzeug throws other kinds of exceptions, such as Redirect
pass
|
(self)
|
42,110 |
flask_restful
|
add_resource
|
Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Type[Resource]`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
|
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Type[Resource]`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app is not None:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs))
|
(self, resource, *urls, **kwargs)
|
42,111 |
flask_restful
|
error_router
|
This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restful endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
|
def error_router(self, original_handler, e):
"""This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restful endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
"""
if self._has_fr_route():
try:
return self.handle_error(e)
except Exception:
pass # Fall through to original handler
return original_handler(e)
|
(self, original_handler, e)
|
42,112 |
flask_restful
|
handle_error
|
Error handler for the API transforms a raised exception into a Flask
response, with the appropriate HTTP status code and body.
:param e: the raised Exception object
:type e: Exception
|
def handle_error(self, e):
"""Error handler for the API transforms a raised exception into a Flask
response, with the appropriate HTTP status code and body.
:param e: the raised Exception object
:type e: Exception
"""
got_request_exception.send(current_app._get_current_object(), exception=e)
_handle_flask_propagate_exceptions_config(current_app, e)
headers = Headers()
if isinstance(e, HTTPException):
if e.response is not None:
# If HTTPException is initialized with a response, then return e.get_response().
# This prevents specified error response from being overridden.
# e.g., HTTPException(response=Response("Hello World"))
resp = e.get_response()
return resp
code = e.code
default_data = {
'message': getattr(e, 'description', http_status_message(code))
}
headers = e.get_response().headers
else:
code = 500
default_data = {
'message': http_status_message(code),
}
# Werkzeug exceptions generate a content-length header which is added
# to the response in addition to the actual content-length header
# https://github.com/flask-restful/flask-restful/issues/534
remove_headers = ('Content-Length',)
for header in remove_headers:
headers.pop(header, None)
data = getattr(e, 'data', default_data)
if code and code >= 500:
exc_info = sys.exc_info()
if exc_info[1] is None:
exc_info = None
current_app.log_exception(exc_info)
error_cls_name = type(e).__name__
if error_cls_name in self.errors:
custom_data = self.errors.get(error_cls_name, {})
code = custom_data.get('status', 500)
data.update(custom_data)
if code == 406 and self.default_mediatype is None:
# if we are handling NotAcceptable (406), make sure that
# make_response uses a representation we support as the
# default mediatype (so that make_response doesn't throw
# another NotAcceptable error).
supported_mediatypes = list(self.representations.keys())
fallback_mediatype = supported_mediatypes[0] if supported_mediatypes else "text/plain"
resp = self.make_response(
data,
code,
headers,
fallback_mediatype = fallback_mediatype
)
else:
resp = self.make_response(data, code, headers)
if code == 401:
resp = self.unauthorized(resp)
return resp
|
(self, e)
|
42,113 |
flask_restful
|
init_app
|
Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
:type app: flask.Flask
:type app: flask.Blueprint
Examples::
api = Api()
api.add_resource(...)
api.init_app(app)
|
def init_app(self, app):
"""Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
:type app: flask.Flask
:type app: flask.Blueprint
Examples::
api = Api()
api.add_resource(...)
api.init_app(app)
"""
# If app is a blueprint, defer the initialization
try:
app.record(self._deferred_blueprint_init)
# Flask.Blueprint has a 'record' attribute, Flask.Api does not
except AttributeError:
self._init_app(app)
else:
self.blueprint = app
|
(self, app)
|
42,114 |
flask_restful
|
make_response
|
Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
|
def make_response(self, data, *args, **kwargs):
"""Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
"""
default_mediatype = kwargs.pop('fallback_mediatype', None) or self.default_mediatype
mediatype = request.accept_mimetypes.best_match(
self.representations,
default=default_mediatype,
)
if mediatype is None:
raise NotAcceptable()
if mediatype in self.representations:
resp = self.representations[mediatype](data, *args, **kwargs)
resp.headers['Content-Type'] = mediatype
return resp
elif mediatype == 'text/plain':
resp = original_flask_make_response(str(data), *args, **kwargs)
resp.headers['Content-Type'] = 'text/plain'
return resp
else:
raise InternalServerError()
|
(self, data, *args, **kwargs)
|
42,115 |
flask_restful
|
mediatypes
|
Returns a list of requested mediatypes sent in the Accept header
|
def mediatypes(self):
"""Returns a list of requested mediatypes sent in the Accept header"""
return [h for h, q in sorted(request.accept_mimetypes,
key=operator.itemgetter(1), reverse=True)]
|
(self)
|
42,116 |
flask_restful
|
mediatypes_method
|
Return a method that returns a list of mediatypes
|
def mediatypes_method(self):
"""Return a method that returns a list of mediatypes
"""
return lambda resource_cls: self.mediatypes() + [self.default_mediatype]
|
(self)
|
42,117 |
flask_restful
|
output
|
Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
|
def output(self, resource):
"""Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
resp = resource(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
data, code, headers = unpack(resp)
return self.make_response(data, code, headers=headers)
return wrapper
|
(self, resource)
|
42,118 |
flask_restful
|
owns_endpoint
|
Tests if an endpoint name (not path) belongs to this Api. Takes
in to account the Blueprint name part of the endpoint name.
:param endpoint: The name of the endpoint being checked
:return: bool
|
def owns_endpoint(self, endpoint):
"""Tests if an endpoint name (not path) belongs to this Api. Takes
in to account the Blueprint name part of the endpoint name.
:param endpoint: The name of the endpoint being checked
:return: bool
"""
if self.blueprint:
if endpoint.startswith(self.blueprint.name):
endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
else:
return False
return endpoint in self.endpoints
|
(self, endpoint)
|
42,119 |
flask_restful
|
representation
|
Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
|
def representation(self, mediatype):
"""Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
"""
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
|
(self, mediatype)
|
42,120 |
flask_restful
|
resource
|
Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
|
def resource(self, *urls, **kwargs):
"""Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
"""
def decorator(cls):
self.add_resource(cls, *urls, **kwargs)
return cls
return decorator
|
(self, *urls, **kwargs)
|
42,121 |
flask_restful
|
unauthorized
|
Given a response, change it to ask for credentials
|
def unauthorized(self, response):
""" Given a response, change it to ask for credentials """
if self.serve_challenge_on_401:
realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restful")
challenge = u"{0} realm=\"{1}\"".format("Basic", realm)
response.headers['WWW-Authenticate'] = challenge
return response
|
(self, response)
|
42,122 |
flask_restful
|
url_for
|
Generates a URL to the given resource.
Works like :func:`flask.url_for`.
|
def url_for(self, resource, **values):
"""Generates a URL to the given resource.
Works like :func:`flask.url_for`."""
endpoint = resource.endpoint
if self.blueprint:
endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)
return url_for(endpoint, **values)
|
(self, resource, **values)
|
42,123 |
werkzeug.exceptions
|
HTTPException
|
The base class for all HTTP exceptions. This exception can be called as a WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
.. versionchanged:: 2.1
Removed the ``wrap`` class method.
|
class HTTPException(Exception):
"""The base class for all HTTP exceptions. This exception can be called as a WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
.. versionchanged:: 2.1
Removed the ``wrap`` class method.
"""
code: int | None = None
description: str | None = None
def __init__(
self,
description: str | None = None,
response: Response | None = None,
) -> None:
super().__init__()
if description is not None:
self.description = description
self.response = response
@property
def name(self) -> str:
"""The status name."""
from .http import HTTP_STATUS_CODES
return HTTP_STATUS_CODES.get(self.code, "Unknown Error") # type: ignore
def get_description(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> str:
"""Get the description."""
if self.description is None:
description = ""
else:
description = self.description
description = escape(description).replace("\n", Markup("<br>"))
return f"<p>{description}</p>"
def get_body(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> str:
"""Get the HTML body."""
return (
"<!doctype html>\n"
"<html lang=en>\n"
f"<title>{self.code} {escape(self.name)}</title>\n"
f"<h1>{escape(self.name)}</h1>\n"
f"{self.get_description(environ)}\n"
)
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> list[tuple[str, str]]:
"""Get a list of headers."""
return [("Content-Type", "text/html; charset=utf-8")]
def get_response(
self,
environ: WSGIEnvironment | WSGIRequest | None = None,
scope: dict[str, t.Any] | None = None,
) -> Response:
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
from .wrappers.response import Response as WSGIResponse # noqa: F811
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ, scope)
return WSGIResponse(self.get_body(environ, scope), self.code, headers)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = t.cast("WSGIResponse", self.get_response(environ))
return response(environ, start_response)
def __str__(self) -> str:
code = self.code if self.code is not None else "???"
return f"{code} {self.name}: {self.description}"
def __repr__(self) -> str:
code = self.code if self.code is not None else "???"
return f"<{type(self).__name__} '{code}: {self.name}'>"
|
(description: str | None = None, response: 'Response | None' = None) -> 'None'
|
42,124 |
werkzeug.exceptions
|
__call__
|
Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
|
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = t.cast("WSGIResponse", self.get_response(environ))
return response(environ, start_response)
|
(self, environ: 'WSGIEnvironment', start_response: 'StartResponse') -> 't.Iterable[bytes]'
|
42,125 |
werkzeug.exceptions
|
__init__
| null |
def __init__(
self,
description: str | None = None,
response: Response | None = None,
) -> None:
super().__init__()
if description is not None:
self.description = description
self.response = response
|
(self, description: 'str | None' = None, response: 'Response | None' = None) -> 'None'
|
42,126 |
werkzeug.exceptions
|
__repr__
| null |
def __repr__(self) -> str:
code = self.code if self.code is not None else "???"
return f"<{type(self).__name__} '{code}: {self.name}'>"
|
(self) -> str
|
42,127 |
werkzeug.exceptions
|
__str__
| null |
def __str__(self) -> str:
code = self.code if self.code is not None else "???"
return f"{code} {self.name}: {self.description}"
|
(self) -> str
|
42,128 |
werkzeug.exceptions
|
get_body
|
Get the HTML body.
|
def get_body(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> str:
"""Get the HTML body."""
return (
"<!doctype html>\n"
"<html lang=en>\n"
f"<title>{self.code} {escape(self.name)}</title>\n"
f"<h1>{escape(self.name)}</h1>\n"
f"{self.get_description(environ)}\n"
)
|
(self, environ: 'WSGIEnvironment | None' = None, scope: 'dict[str, t.Any] | None' = None) -> 'str'
|
42,129 |
werkzeug.exceptions
|
get_description
|
Get the description.
|
def get_description(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> str:
"""Get the description."""
if self.description is None:
description = ""
else:
description = self.description
description = escape(description).replace("\n", Markup("<br>"))
return f"<p>{description}</p>"
|
(self, environ: 'WSGIEnvironment | None' = None, scope: 'dict[str, t.Any] | None' = None) -> 'str'
|
42,130 |
werkzeug.exceptions
|
get_headers
|
Get a list of headers.
|
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict[str, t.Any] | None = None,
) -> list[tuple[str, str]]:
"""Get a list of headers."""
return [("Content-Type", "text/html; charset=utf-8")]
|
(self, environ: 'WSGIEnvironment | None' = None, scope: 'dict[str, t.Any] | None' = None) -> 'list[tuple[str, str]]'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.