response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Parse an integer parameter from the request string
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
Returns:
An int value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not an integer. | def parse_integer_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[int] = None,
required: bool = False,
) -> Optional[int]:
"""Parse an integer parameter from the request string
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
Returns:
An int value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not an integer.
"""
name_bytes = name.encode("ascii")
if name_bytes in args:
try:
return int(args[name_bytes][0])
except Exception:
message = "Query parameter %r must be an integer" % (name,)
raise SynapseError(
HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
)
else:
if required:
message = "Missing integer query parameter %r" % (name,)
raise SynapseError(
HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
)
else:
return default |
Parse a boolean parameter from the request query string
Args:
request: the twisted HTTP request.
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
Returns:
A bool value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not one of "true" or "false". | def parse_boolean(
request: Request, name: str, default: Optional[bool] = None, required: bool = False
) -> Optional[bool]:
"""Parse a boolean parameter from the request query string
Args:
request: the twisted HTTP request.
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
Returns:
A bool value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not one of "true" or "false".
"""
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
return parse_boolean_from_args(args, name, default, required) |
Parse a boolean parameter from the request query string
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
Returns:
A bool value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not one of "true" or "false". | def parse_boolean_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[bool] = None,
required: bool = False,
) -> Optional[bool]:
"""Parse a boolean parameter from the request query string
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the parameter is absent,
defaults to False.
Returns:
A bool value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not one of "true" or "false".
"""
name_bytes = name.encode("ascii")
if name_bytes in args:
try:
return {b"true": True, b"false": False}[args[name_bytes][0]]
except Exception:
message = (
"Boolean query parameter %r must be one of ['true', 'false']"
) % (name,)
raise SynapseError(
HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
)
else:
if required:
message = "Missing boolean query parameter %r" % (name,)
raise SynapseError(
HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
)
else:
return default |
Parse a string parameter as bytes from the request query string.
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent,
defaults to None. Must be bytes if encoding is None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
Returns:
Bytes or the default value.
Raises:
SynapseError if the parameter is absent and required. | def parse_bytes_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[bytes] = None,
required: bool = False,
) -> Optional[bytes]:
"""
Parse a string parameter as bytes from the request query string.
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent,
defaults to None. Must be bytes if encoding is None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
Returns:
Bytes or the default value.
Raises:
SynapseError if the parameter is absent and required.
"""
name_bytes = name.encode("ascii")
if name_bytes in args:
return args[name_bytes][0]
elif required:
message = "Missing string query parameter %s" % (name,)
raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM)
return default |
Parse a string parameter from the request query string.
If encoding is not None, the content of the query param will be
decoded to Unicode using the encoding, otherwise it will be encoded
Args:
request: the twisted HTTP request.
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None. Must be
the same type as name, if given.
encoding: The encoding to decode the string content with.
Returns:
A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values. | def parse_string(
request: Request,
name: str,
default: Optional[str] = None,
required: bool = False,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[str]:
"""
Parse a string parameter from the request query string.
If encoding is not None, the content of the query param will be
decoded to Unicode using the encoding, otherwise it will be encoded
Args:
request: the twisted HTTP request.
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None. Must be
the same type as name, if given.
encoding: The encoding to decode the string content with.
Returns:
A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore
return parse_string_from_args(
args,
name,
default,
required=required,
allowed_values=allowed_values,
encoding=encoding,
) |
Parse an enum parameter from the request query string.
Note that the enum *must only have string values*.
Args:
request: the twisted HTTP request.
name: the name of the query parameter.
E: the enum which represents valid values
default: enum value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
Returns:
An enum value.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values. | def parse_enum(
request: Request,
name: str,
E: Type[EnumT],
default: Optional[EnumT] = None,
required: bool = False,
) -> Optional[EnumT]:
"""
Parse an enum parameter from the request query string.
Note that the enum *must only have string values*.
Args:
request: the twisted HTTP request.
name: the name of the query parameter.
E: the enum which represents valid values
default: enum value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
Returns:
An enum value.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
# Assert the enum values are strings.
assert all(
isinstance(e.value, str) for e in E
), "parse_enum only works with string values"
str_value = parse_string(
request,
name,
default=default.value if default is not None else None,
required=required,
allowed_values=[e.value for e in E],
)
if str_value is None:
return None
return E(str_value) |
Parse a string parameter from the request query string list.
The content of the query param will be decoded to Unicode using the encoding.
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None.
encoding: The encoding to decode the string content with.
Returns:
A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values. | def parse_strings_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[List[str]] = None,
required: bool = False,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[List[str]]:
"""
Parse a string parameter from the request query string list.
The content of the query param will be decoded to Unicode using the encoding.
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None.
encoding: The encoding to decode the string content with.
Returns:
A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
name_bytes = name.encode("ascii")
if name_bytes in args:
values = args[name_bytes]
return [
_parse_string_value(value, allowed_values, name=name, encoding=encoding)
for value in values
]
else:
if required:
message = "Missing string query parameter %r" % (name,)
raise SynapseError(
HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
)
return default |
Parse the string parameter from the request query string list
and return the first result.
The content of the query param will be decoded to Unicode using the encoding.
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None. Must be
the same type as name, if given.
encoding: The encoding to decode the string content with.
Returns:
A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values. | def parse_string_from_args(
args: Mapping[bytes, Sequence[bytes]],
name: str,
default: Optional[str] = None,
required: bool = False,
allowed_values: Optional[StrCollection] = None,
encoding: str = "ascii",
) -> Optional[str]:
"""
Parse the string parameter from the request query string list
and return the first result.
The content of the query param will be decoded to Unicode using the encoding.
Args:
args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
name: the name of the query parameter.
default: value to use if the parameter is absent, defaults to None.
required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None. Must be
the same type as name, if given.
encoding: The encoding to decode the string content with.
Returns:
A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
strings = parse_strings_from_args(
args,
name,
default=[default] if default is not None else None,
required=required,
allowed_values=allowed_values,
encoding=encoding,
)
if strings is None:
return None
return strings[0] |
Parse a JSON value from the body of a twisted HTTP request.
Args:
request: the twisted HTTP request.
allow_empty_body: if True, an empty body will be accepted and turned into None
Returns:
The JSON value.
Raises:
SynapseError if the request body couldn't be decoded as JSON. | def parse_json_value_from_request(
request: Request, allow_empty_body: bool = False
) -> Optional[JsonDict]:
"""Parse a JSON value from the body of a twisted HTTP request.
Args:
request: the twisted HTTP request.
allow_empty_body: if True, an empty body will be accepted and turned into None
Returns:
The JSON value.
Raises:
SynapseError if the request body couldn't be decoded as JSON.
"""
try:
content_bytes = request.content.read() # type: ignore
except Exception:
raise SynapseError(HTTPStatus.BAD_REQUEST, "Error reading JSON content.")
if not content_bytes and allow_empty_body:
return None
try:
content = json_decoder.decode(content_bytes.decode("utf-8"))
except Exception as e:
logger.warning(
"Unable to parse JSON from %s %s response: %s (%s)",
request.method.decode("ascii", errors="replace"),
redact_uri(request.uri.decode("ascii", errors="replace")),
e,
content_bytes,
)
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Content not JSON.", errcode=Codes.NOT_JSON
)
return content |
Parse a JSON object from the body of a twisted HTTP request.
Args:
request: the twisted HTTP request.
allow_empty_body: if True, an empty body will be accepted and turned into
an empty dict.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object. | def parse_json_object_from_request(
request: Request, allow_empty_body: bool = False
) -> JsonDict:
"""Parse a JSON object from the body of a twisted HTTP request.
Args:
request: the twisted HTTP request.
allow_empty_body: if True, an empty body will be accepted and turned into
an empty dict.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object.
"""
content = parse_json_value_from_request(request, allow_empty_body=allow_empty_body)
if allow_empty_body and content is None:
return {}
if not isinstance(content, dict):
message = "Content must be a JSON object."
raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON)
return content |
Validate a deserialized JSON object using the given pydantic model.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object. | def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model:
"""Validate a deserialized JSON object using the given pydantic model.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object.
"""
try:
instance = model_type.parse_obj(content)
except ValidationError as e:
# Choose a matrix error code. The catch-all is BAD_JSON, but we try to find a
# more specific error if possible (which occasionally helps us to be spec-
# compliant) This is a bit awkward because the spec's error codes aren't very
# clear-cut: BAD_JSON arguably overlaps with MISSING_PARAM and INVALID_PARAM.
errcode = Codes.BAD_JSON
raw_errors = e.raw_errors
if len(raw_errors) == 1 and isinstance(raw_errors[0], ErrorWrapper):
raw_error = raw_errors[0].exc
if isinstance(raw_error, MissingError):
errcode = Codes.MISSING_PARAM
elif isinstance(raw_error, PydanticValueError):
errcode = Codes.INVALID_PARAM
raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=errcode)
return instance |
Parse a JSON object from the body of a twisted HTTP request, then deserialise and
validate using the given pydantic model.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object. | def parse_and_validate_json_object_from_request(
request: Request, model_type: Type[Model]
) -> Model:
"""Parse a JSON object from the body of a twisted HTTP request, then deserialise and
validate using the given pydantic model.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object.
"""
content = parse_json_object_from_request(request, allow_empty_body=False)
return validate_json_object(content, model_type) |
Strips sensitive information from the uri replaces with <redacted> | def redact_uri(uri: str) -> str:
"""Strips sensitive information from the uri replaces with <redacted>"""
uri = ACCESS_TOKEN_RE.sub(r"\1<redacted>\3", uri)
return CLIENT_SECRET_RE.sub(r"\1<redacted>\3", uri) |
Return the full URI that was requested by the client | def get_request_uri(request: IRequest) -> bytes:
"""Return the full URI that was requested by the client"""
return b"%s://%s%s" % (
b"https" if request.isSecure() else b"http",
_get_requested_host(request),
# despite its name, "request.uri" is only the path and query-string.
request.uri,
) |
Return the last User-Agent header, or the given default. | def get_request_user_agent(request: IRequest, default: str = "") -> str:
"""Return the last User-Agent header, or the given default."""
# There could be raw utf-8 bytes in the User-Agent header.
# N.B. if you don't do this, the logger explodes cryptically
# with maximum recursion trying to log errors about
# the charset problem.
# c.f. https://github.com/matrix-org/synapse/issues/3471
h = request.getHeader(b"User-Agent")
return h.decode("ascii", "replace") if h else default |
Test if the given host name is either an IPv4 or IPv6 literal.
Args:
host: The host name to check
Returns:
True if the hostname is an IP address literal. | def _is_ip_literal(host: bytes) -> bool:
"""Test if the given host name is either an IPv4 or IPv6 literal.
Args:
host: The host name to check
Returns:
True if the hostname is an IP address literal.
"""
host_str = host.decode("ascii")
try:
IPAddress(host_str)
return True
except AddrFormatError:
return False |
Given a list of SRV records sort them into priority order and shuffle
each priority with the given weight. | def _sort_server_list(server_list: List[Server]) -> List[Server]:
"""Given a list of SRV records sort them into priority order and shuffle
each priority with the given weight.
"""
priority_map: Dict[int, List[Server]] = {}
for server in server_list:
priority_map.setdefault(server.priority, []).append(server)
results = []
for priority in sorted(priority_map):
servers = priority_map[priority]
# This algorithms roughly follows the algorithm described in RFC2782,
# changed to remove an off-by-one error.
#
# N.B. Weights can be zero, which means that they should be picked
# rarely.
total_weight = sum(s.weight for s in servers)
# Total weight can become zero if there are only zero weight servers
# left, which we handle by just shuffling and appending to the results.
while servers and total_weight:
target_weight = random.randint(1, total_weight)
for s in servers:
target_weight -= s.weight
if target_weight <= 0:
break
results.append(s)
servers.remove(s)
total_weight -= s.weight
if servers:
random.shuffle(servers)
results.extend(servers)
return results |
Get the current logging context from thread local storage | def current_context() -> LoggingContextOrSentinel:
"""Get the current logging context from thread local storage"""
return getattr(_thread_local, "current_context", SENTINEL_CONTEXT) |
Set the current logging context in thread local storage
Args:
context: The context to activate.
Returns:
The context that was previously active | def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
"""Set the current logging context in thread local storage
Args:
context: The context to activate.
Returns:
The context that was previously active
"""
# everything blows up if we allow current_context to be set to None, so sanity-check
# that now.
if context is None:
raise TypeError("'context' argument may not be None")
current = current_context()
if current is not context:
rusage = get_thread_resource_usage()
current.stop(rusage)
_thread_local.current_context = context
context.start(rusage)
return current |
Creates a new logging context as a child of another.
The nested logging context will have a 'name' made up of the parent context's
name, plus the given suffix.
CPU/db usage stats will be added to the parent context's on exit.
Normal usage looks like:
with nested_logging_context(suffix):
# ... do stuff
Args:
suffix: suffix to add to the parent context's 'name'.
Returns:
A new logging context. | def nested_logging_context(suffix: str) -> LoggingContext:
"""Creates a new logging context as a child of another.
The nested logging context will have a 'name' made up of the parent context's
name, plus the given suffix.
CPU/db usage stats will be added to the parent context's on exit.
Normal usage looks like:
with nested_logging_context(suffix):
# ... do stuff
Args:
suffix: suffix to add to the parent context's 'name'.
Returns:
A new logging context.
"""
curr_context = current_context()
if not curr_context:
logger.warning(
"Starting nested logging context from sentinel context: metrics will be lost"
)
parent_context = None
else:
assert isinstance(curr_context, LoggingContext)
parent_context = curr_context
prefix = str(curr_context)
return LoggingContext(
prefix + "-" + suffix,
parent_context=parent_context,
) |
Function decorator which wraps the function with run_in_background | def preserve_fn(
f: Union[
Callable[P, R],
Callable[P, Awaitable[R]],
]
) -> Callable[P, "defer.Deferred[R]"]:
"""Function decorator which wraps the function with run_in_background"""
def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]":
return run_in_background(f, *args, **kwargs)
return g |
Calls a function, ensuring that the current context is restored after
return from the function, and that the sentinel context is set once the
deferred returned by the function completes.
Useful for wrapping functions that return a deferred or coroutine, which you don't
yield or await on (for instance because you want to pass it to
deferred.gatherResults()).
If f returns a Coroutine object, it will be wrapped into a Deferred (which will have
the side effect of executing the coroutine).
Note that if you completely discard the result, you should make sure that
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
CRITICAL error about an unhandled error will be logged without much
indication about where it came from. | def run_in_background( # type: ignore[misc]
# The `type: ignore[misc]` above suppresses
# "Overloaded function implementation does not accept all possible arguments of signature 1"
# "Overloaded function implementation does not accept all possible arguments of signature 2"
# which seems like a bug in mypy.
f: Union[
Callable[P, R],
Callable[P, Awaitable[R]],
],
*args: P.args,
**kwargs: P.kwargs,
) -> "defer.Deferred[R]":
"""Calls a function, ensuring that the current context is restored after
return from the function, and that the sentinel context is set once the
deferred returned by the function completes.
Useful for wrapping functions that return a deferred or coroutine, which you don't
yield or await on (for instance because you want to pass it to
deferred.gatherResults()).
If f returns a Coroutine object, it will be wrapped into a Deferred (which will have
the side effect of executing the coroutine).
Note that if you completely discard the result, you should make sure that
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
CRITICAL error about an unhandled error will be logged without much
indication about where it came from.
"""
current = current_context()
try:
res = f(*args, **kwargs)
except Exception:
# the assumption here is that the caller doesn't want to be disturbed
# by synchronous exceptions, so let's turn them into Failures.
return defer.fail()
# `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain
# value. Convert it to a `Deferred`.
d: "defer.Deferred[R]"
if isinstance(res, typing.Coroutine):
# Wrap the coroutine in a `Deferred`.
d = defer.ensureDeferred(res)
elif isinstance(res, defer.Deferred):
d = res
elif isinstance(res, Awaitable):
# `res` is probably some kind of completed awaitable, such as a `DoneAwaitable`
# or `Future` from `make_awaitable`.
d = defer.ensureDeferred(_unwrap_awaitable(res))
else:
# `res` is a plain value. Wrap it in a `Deferred`.
d = defer.succeed(res)
if d.called and not d.paused:
# The function should have maintained the logcontext, so we can
# optimise out the messing about
return d
# The function may have reset the context before returning, so
# we need to restore it now.
ctx = set_current_context(current)
# The original context will be restored when the deferred
# completes, but there is nothing waiting for it, so it will
# get leaked into the reactor or some other function which
# wasn't expecting it. We therefore need to reset the context
# here.
#
# (If this feels asymmetric, consider it this way: we are
# effectively forking a new thread of execution. We are
# probably currently within a ``with LoggingContext()`` block,
# which is supposed to have a single entry and exit point. But
# by spawning off another deferred, we are effectively
# adding a new exit point.)
d.addBoth(_set_context_cb, ctx)
return d |
Given a deferred, make it follow the Synapse logcontext rules:
If the deferred has completed, essentially does nothing (just returns another
completed deferred with the result/failure).
If the deferred has not yet completed, resets the logcontext before
returning a deferred. Then, when the deferred completes, restores the
current logcontext before running callbacks/errbacks.
(This is more-or-less the opposite operation to run_in_background.) | def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
"""Given a deferred, make it follow the Synapse logcontext rules:
If the deferred has completed, essentially does nothing (just returns another
completed deferred with the result/failure).
If the deferred has not yet completed, resets the logcontext before
returning a deferred. Then, when the deferred completes, restores the
current logcontext before running callbacks/errbacks.
(This is more-or-less the opposite operation to run_in_background.)
"""
if deferred.called and not deferred.paused:
# it looks like this deferred is ready to run any callbacks we give it
# immediately. We may as well optimise out the logcontext faffery.
return deferred
# ok, we can't be sure that a yield won't block, so let's reset the
# logcontext, and add a callback to the deferred to restore it.
prev_context = set_current_context(SENTINEL_CONTEXT)
deferred.addBoth(_set_context_cb, prev_context)
return deferred |
A callback function which just sets the logging context | def _set_context_cb(result: ResultT, context: LoggingContextOrSentinel) -> ResultT:
"""A callback function which just sets the logging context"""
set_current_context(context)
return result |
Calls the function `f` using a thread from the reactor's default threadpool and
returns the result as a Deferred.
Creates a new logcontext for `f`, which is created as a child of the current
logcontext (so its CPU usage metrics will get attributed to the current
logcontext). `f` should preserve the logcontext it is given.
The result deferred follows the Synapse logcontext rules: you should `yield`
on it.
Args:
reactor: The reactor in whose main thread the Deferred will be invoked,
and whose threadpool we should use for the function.
Normally this will be hs.get_reactor().
f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception. | def defer_to_thread(
reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
) -> "defer.Deferred[R]":
"""
Calls the function `f` using a thread from the reactor's default threadpool and
returns the result as a Deferred.
Creates a new logcontext for `f`, which is created as a child of the current
logcontext (so its CPU usage metrics will get attributed to the current
logcontext). `f` should preserve the logcontext it is given.
The result deferred follows the Synapse logcontext rules: you should `yield`
on it.
Args:
reactor: The reactor in whose main thread the Deferred will be invoked,
and whose threadpool we should use for the function.
Normally this will be hs.get_reactor().
f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) |
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
logcontexts correctly.
Calls the function `f` using a thread from the given threadpool and returns
the result as a Deferred.
Creates a new logcontext for `f`, which is created as a child of the current
logcontext (so its CPU usage metrics will get attributed to the current
logcontext). `f` should preserve the logcontext it is given.
The result deferred follows the Synapse logcontext rules: you should `yield`
on it.
Args:
reactor: The reactor in whose main thread the Deferred will be invoked.
Normally this will be hs.get_reactor().
threadpool: The threadpool to use for running `f`. Normally this will be
hs.get_reactor().getThreadPool().
f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception. | def defer_to_threadpool(
reactor: "ISynapseReactor",
threadpool: ThreadPool,
f: Callable[P, R],
*args: P.args,
**kwargs: P.kwargs,
) -> "defer.Deferred[R]":
"""
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
logcontexts correctly.
Calls the function `f` using a thread from the given threadpool and returns
the result as a Deferred.
Creates a new logcontext for `f`, which is created as a child of the current
logcontext (so its CPU usage metrics will get attributed to the current
logcontext). `f` should preserve the logcontext it is given.
The result deferred follows the Synapse logcontext rules: you should `yield`
on it.
Args:
reactor: The reactor in whose main thread the Deferred will be invoked.
Normally this will be hs.get_reactor().
threadpool: The threadpool to use for running `f`. Normally this will be
hs.get_reactor().getThreadPool().
f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
curr_context = current_context()
if not curr_context:
logger.warning(
"Calling defer_to_threadpool from sentinel context: metrics will be lost"
)
parent_context = None
else:
assert isinstance(curr_context, LoggingContext)
parent_context = curr_context
def g() -> R:
with LoggingContext(str(curr_context), parent_context=parent_context):
return f(*args, **kwargs)
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) |
Executes the function only if we're tracing. Otherwise returns None. | def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]:
"""Executes the function only if we're tracing. Otherwise returns None."""
@wraps(func)
def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
if opentracing:
return func(*args, **kwargs)
else:
return None
return _only_if_tracing_inner |
Executes the operation only if opentracing is enabled and there is an active span.
If there is no active span it logs message at the error level.
Args:
message: Message which fills in "There was no active span when trying to %s"
in the error log if there is no active span and opentracing is enabled.
ret: return value if opentracing is None or there is no active span.
Returns:
The result of the func, falling back to ret if opentracing is disabled or there
was no active span. | def ensure_active_span(
message: str, ret: Optional[T] = None
) -> Callable[[Callable[P, R]], Callable[P, Union[Optional[T], R]]]:
"""Executes the operation only if opentracing is enabled and there is an active span.
If there is no active span it logs message at the error level.
Args:
message: Message which fills in "There was no active span when trying to %s"
in the error log if there is no active span and opentracing is enabled.
ret: return value if opentracing is None or there is no active span.
Returns:
The result of the func, falling back to ret if opentracing is disabled or there
was no active span.
"""
def ensure_active_span_inner_1(
func: Callable[P, R]
) -> Callable[P, Union[Optional[T], R]]:
@wraps(func)
def ensure_active_span_inner_2(
*args: P.args, **kwargs: P.kwargs
) -> Union[Optional[T], R]:
if not opentracing:
return ret
if not opentracing.tracer.active_span:
logger.error(
"There was no active span when trying to %s."
" Did you forget to start one or did a context slip?",
message,
stack_info=True,
)
return ret
return func(*args, **kwargs)
return ensure_active_span_inner_2
return ensure_active_span_inner_1 |
Set the whitelists and initialise the JaegerClient tracer | def init_tracer(hs: "HomeServer") -> None:
"""Set the whitelists and initialise the JaegerClient tracer"""
global opentracing
if not hs.config.tracing.opentracer_enabled:
# We don't have a tracer
opentracing = None # type: ignore[assignment]
return
if opentracing is None or JaegerConfig is None:
raise ConfigError(
"The server has been configured to use opentracing but opentracing is not "
"installed."
)
# Pull out the jaeger config if it was given. Otherwise set it to something sensible.
# See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py
set_homeserver_whitelist(hs.config.tracing.opentracer_whitelist)
from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
# Instance names are opaque strings but by stripping off the number suffix,
# we can get something that looks like a "worker type", e.g.
# "client_reader-1" -> "client_reader" so we don't spread the traces across
# so many services.
instance_name_by_type = re.sub(
STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name()
)
jaeger_config = hs.config.tracing.jaeger_config
tags = jaeger_config.setdefault("tags", {})
# tag the Synapse instance name so that it's an easy jumping
# off point into the logs. Can also be used to filter for an
# instance that is under load.
tags[SynapseTags.INSTANCE_NAME] = hs.get_instance_name()
config = JaegerConfig(
config=jaeger_config,
service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
scope_manager=LogContextScopeManager(),
metrics_factory=PrometheusMetricsFactory(),
)
# If we have the rust jaeger reporter available let's use that.
if RustReporter:
logger.info("Using rust_python_jaeger_reporter library")
assert config.sampler is not None
tracer = config.create_tracer(RustReporter(), config.sampler)
opentracing.set_global_tracer(tracer)
else:
config.initialize_tracer() |
Sets the homeserver whitelist
Args:
homeserver_whitelist: regexes specifying whitelisted homeservers | def set_homeserver_whitelist(homeserver_whitelist: Iterable[str]) -> None:
"""Sets the homeserver whitelist
Args:
homeserver_whitelist: regexes specifying whitelisted homeservers
"""
global _homeserver_whitelist
if homeserver_whitelist:
# Makes a single regex which accepts all passed in regexes in the list
_homeserver_whitelist = re.compile(
"({})".format(")|(".join(homeserver_whitelist))
) |
Checks if a destination matches the whitelist
Args:
destination | def whitelisted_homeserver(destination: str) -> bool:
"""Checks if a destination matches the whitelist
Args:
destination
"""
if _homeserver_whitelist:
return _homeserver_whitelist.match(destination) is not None
return False |
Starts an active opentracing span.
Records the start time for the span, and sets it as the "active span" in the
scope manager.
Args:
See opentracing.tracer
Returns:
scope (Scope) or contextlib.nullcontext | def start_active_span(
operation_name: str,
child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
references: Optional[List["opentracing.Reference"]] = None,
tags: Optional[Dict[str, str]] = None,
start_time: Optional[float] = None,
ignore_active_span: bool = False,
finish_on_close: bool = True,
*,
tracer: Optional["opentracing.Tracer"] = None,
) -> "opentracing.Scope":
"""Starts an active opentracing span.
Records the start time for the span, and sets it as the "active span" in the
scope manager.
Args:
See opentracing.tracer
Returns:
scope (Scope) or contextlib.nullcontext
"""
if opentracing is None:
return contextlib.nullcontext() # type: ignore[unreachable]
if tracer is None:
# use the global tracer by default
tracer = opentracing.tracer
return tracer.start_active_span(
operation_name,
child_of=child_of,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
finish_on_close=finish_on_close,
) |
Starts an active opentracing span, with additional references to previous spans
Args:
operation_name: name of the operation represented by the new span
contexts: the previous spans to inherit from
child_of: optionally override the parent span. If unset, the currently active
span will be the parent. (If there is no currently active span, the first
span in `contexts` will be the parent.)
start_time: optional override for the start time of the created span. Seconds
since the epoch.
inherit_force_tracing: if set, and any of the previous contexts have had tracing
forced, the new span will also have tracing forced.
tracer: override the opentracing tracer. By default the global tracer is used. | def start_active_span_follows_from(
operation_name: str,
contexts: Collection,
child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None,
start_time: Optional[float] = None,
*,
inherit_force_tracing: bool = False,
tracer: Optional["opentracing.Tracer"] = None,
) -> "opentracing.Scope":
"""Starts an active opentracing span, with additional references to previous spans
Args:
operation_name: name of the operation represented by the new span
contexts: the previous spans to inherit from
child_of: optionally override the parent span. If unset, the currently active
span will be the parent. (If there is no currently active span, the first
span in `contexts` will be the parent.)
start_time: optional override for the start time of the created span. Seconds
since the epoch.
inherit_force_tracing: if set, and any of the previous contexts have had tracing
forced, the new span will also have tracing forced.
tracer: override the opentracing tracer. By default the global tracer is used.
"""
if opentracing is None:
return contextlib.nullcontext() # type: ignore[unreachable]
references = [opentracing.follows_from(context) for context in contexts]
scope = start_active_span(
operation_name,
child_of=child_of,
references=references,
start_time=start_time,
tracer=tracer,
)
if inherit_force_tracing and any(
is_context_forced_tracing(ctx) for ctx in contexts
):
force_tracing(scope.span)
return scope |
Extracts a span context from an edu and uses it to start a new active span
Args:
edu_content: an edu_content with a `context` field whose value is
canonical json for a dict which contains opentracing information.
For the other args see opentracing.tracer | def start_active_span_from_edu(
edu_content: Dict[str, Any],
operation_name: str,
references: Optional[List["opentracing.Reference"]] = None,
tags: Optional[Dict[str, str]] = None,
start_time: Optional[float] = None,
ignore_active_span: bool = False,
finish_on_close: bool = True,
) -> "opentracing.Scope":
"""
Extracts a span context from an edu and uses it to start a new active span
Args:
edu_content: an edu_content with a `context` field whose value is
canonical json for a dict which contains opentracing information.
For the other args see opentracing.tracer
"""
references = references or []
if opentracing is None:
return contextlib.nullcontext() # type: ignore[unreachable]
carrier = json_decoder.decode(edu_content.get("context", "{}")).get(
"opentracing", {}
)
context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
_references = [
opentracing.child_of(span_context_from_string(x))
for x in carrier.get("references", [])
]
# For some reason jaeger decided not to support the visualization of multiple parent
# spans or explicitly show references. I include the span context as a tag here as
# an aid to people debugging but it's really not an ideal solution.
references += _references
scope = opentracing.tracer.start_active_span(
operation_name,
child_of=context,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
finish_on_close=finish_on_close,
)
scope.span.set_tag("references", carrier.get("references", []))
return scope |
Get the currently active span, if any | def active_span() -> Optional["opentracing.Span"]:
"""Get the currently active span, if any"""
return opentracing.tracer.active_span |
Sets a tag on the active span | def set_tag(key: str, value: Union[str, bool, int, float]) -> None:
"""Sets a tag on the active span"""
assert opentracing.tracer.active_span is not None
opentracing.tracer.active_span.set_tag(key, value) |
Log to the active span | def log_kv(key_values: Dict[str, Any], timestamp: Optional[float] = None) -> None:
"""Log to the active span"""
assert opentracing.tracer.active_span is not None
opentracing.tracer.active_span.log_kv(key_values, timestamp) |
Sets the operation name of the active span | def set_operation_name(operation_name: str) -> None:
"""Sets the operation name of the active span"""
assert opentracing.tracer.active_span is not None
opentracing.tracer.active_span.set_operation_name(operation_name) |
Force sampling for the active/given span and its children.
Args:
span: span to force tracing for. By default, the active span. | def force_tracing(
span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel
) -> None:
"""Force sampling for the active/given span and its children.
Args:
span: span to force tracing for. By default, the active span.
"""
if isinstance(span, _Sentinel):
span_to_trace = opentracing.tracer.active_span
else:
span_to_trace = span
if span_to_trace is None:
logger.error("No active span in force_tracing")
return
span_to_trace.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
# also set a bit of baggage, so that we have a way of figuring out if
# it is enabled later
span_to_trace.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1") |
Check if sampling has been force for the given span context. | def is_context_forced_tracing(
span_context: Optional["opentracing.SpanContext"],
) -> bool:
"""Check if sampling has been force for the given span context."""
if span_context is None:
return False
return span_context.baggage.get(SynapseBaggage.FORCE_TRACING) is not None |
Injects a span context into a dict of HTTP headers
Args:
headers: the dict to inject headers into
destination: address of entity receiving the span context. Must be given unless
check_destination is False. The context will only be injected if the
destination matches the opentracing whitelist
check_destination: If false, destination will be ignored and the context
will always be injected.
Note:
The headers set by the tracer are custom to the tracer implementation which
should be unique enough that they don't interfere with any headers set by
synapse or twisted. If we're still using jaeger these headers would be those
here:
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py | def inject_header_dict(
headers: Dict[bytes, List[bytes]],
destination: Optional[str] = None,
check_destination: bool = True,
) -> None:
"""
Injects a span context into a dict of HTTP headers
Args:
headers: the dict to inject headers into
destination: address of entity receiving the span context. Must be given unless
check_destination is False. The context will only be injected if the
destination matches the opentracing whitelist
check_destination: If false, destination will be ignored and the context
will always be injected.
Note:
The headers set by the tracer are custom to the tracer implementation which
should be unique enough that they don't interfere with any headers set by
synapse or twisted. If we're still using jaeger these headers would be those
here:
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
if check_destination:
if destination is None:
raise ValueError(
"destination must be given unless check_destination is False"
)
if not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
carrier: Dict[str, str] = {}
assert span is not None
opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier)
for key, value in carrier.items():
headers[key.encode()] = [value.encode()] |
Inject the current trace id into the HTTP response headers | def inject_response_headers(response_headers: Headers) -> None:
"""Inject the current trace id into the HTTP response headers"""
if not opentracing:
return
span = opentracing.tracer.active_span
if not span:
return
# This is a bit implementation-specific.
#
# Jaeger's Spans have a trace_id property; other implementations (including the
# dummy opentracing.span.Span which we use if init_tracer is not called) do not
# expose it
trace_id = getattr(span, "trace_id", None)
if trace_id is not None:
response_headers.addRawHeader("Synapse-Trace-Id", f"{trace_id:x}") |
Gets a span context as a dict. This can be used instead of manually
injecting a span into an empty carrier.
Args:
destination: the name of the remote server.
Returns:
the active span's context if opentracing is enabled, otherwise empty. | def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]:
"""
Gets a span context as a dict. This can be used instead of manually
injecting a span into an empty carrier.
Args:
destination: the name of the remote server.
Returns:
the active span's context if opentracing is enabled, otherwise empty.
"""
if destination and not whitelisted_homeserver(destination):
return {}
carrier: Dict[str, str] = {}
assert opentracing.tracer.active_span is not None
opentracing.tracer.inject(
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
)
return carrier |
Returns:
The active span context encoded as a string. | def active_span_context_as_string() -> str:
"""
Returns:
The active span context encoded as a string.
"""
carrier: Dict[str, str] = {}
if opentracing:
assert opentracing.tracer.active_span is not None
opentracing.tracer.inject(
opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
)
return json_encoder.encode(carrier) |
Extract an opentracing context from the headers on an HTTP request
This is useful when we have received an HTTP request from another part of our
system, and want to link our spans to those of the remote system. | def span_context_from_request(request: Request) -> "Optional[opentracing.SpanContext]":
"""Extract an opentracing context from the headers on an HTTP request
This is useful when we have received an HTTP request from another part of our
system, and want to link our spans to those of the remote system.
"""
if not opentracing:
return None
header_dict = {
k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders()
}
return opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) |
Returns:
The active span context decoded from a string. | def span_context_from_string(carrier: str) -> Optional["opentracing.SpanContext"]:
"""
Returns:
The active span context decoded from a string.
"""
payload: Dict[str, str] = json_decoder.decode(carrier)
return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, payload) |
Wrapper method for opentracing's tracer.extract for TEXT_MAP.
Args:
carrier: a dict possibly containing a span context.
Returns:
The active span context extracted from carrier. | def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanContext"]:
"""
Wrapper method for opentracing's tracer.extract for TEXT_MAP.
Args:
carrier: a dict possibly containing a span context.
Returns:
The active span context extracted from carrier.
"""
return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) |
Decorates a function that is sync or async (coroutines), or that returns a Twisted
`Deferred`. The custom business logic of the decorator goes in `wrapping_logic`.
Example usage:
```py
# Decorator to time the function and log it out
def duration(func: Callable[P, R]) -> Callable[P, R]:
@contextlib.contextmanager
def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> Generator[None, None, None]:
start_ts = time.time()
try:
yield
finally:
end_ts = time.time()
duration = end_ts - start_ts
logger.info("%s took %s seconds", func.__name__, duration)
return _custom_sync_async_decorator(func, _wrapping_logic)
```
Args:
func: The function to be decorated
wrapping_logic: The business logic of your custom decorator.
This should be a ContextManager so you are able to run your logic
before/after the function as desired. | def _custom_sync_async_decorator(
func: Callable[P, R],
wrapping_logic: Callable[Concatenate[Callable[P, R], P], ContextManager[None]],
) -> Callable[P, R]:
"""
Decorates a function that is sync or async (coroutines), or that returns a Twisted
`Deferred`. The custom business logic of the decorator goes in `wrapping_logic`.
Example usage:
```py
# Decorator to time the function and log it out
def duration(func: Callable[P, R]) -> Callable[P, R]:
@contextlib.contextmanager
def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> Generator[None, None, None]:
start_ts = time.time()
try:
yield
finally:
end_ts = time.time()
duration = end_ts - start_ts
logger.info("%s took %s seconds", func.__name__, duration)
return _custom_sync_async_decorator(func, _wrapping_logic)
```
Args:
func: The function to be decorated
wrapping_logic: The business logic of your custom decorator.
This should be a ContextManager so you are able to run your logic
before/after the function as desired.
"""
if inspect.iscoroutinefunction(func):
# For this branch, we handle async functions like `async def func() -> RInner`.
# In this branch, R = Awaitable[RInner], for some other type RInner
@wraps(func)
async def _wrapper(
*args: P.args, **kwargs: P.kwargs
) -> Any: # Return type is RInner
# type-ignore: func() returns R, but mypy doesn't know that R is
# Awaitable here.
with wrapping_logic(func, *args, **kwargs): # type: ignore[arg-type]
return await func(*args, **kwargs)
else:
# The other case here handles sync functions including those decorated with
# `@defer.inlineCallbacks` or that return a `Deferred` or other `Awaitable`.
@wraps(func)
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> Any:
scope = wrapping_logic(func, *args, **kwargs)
scope.__enter__()
try:
result = func(*args, **kwargs)
if isinstance(result, defer.Deferred):
def call_back(result: R) -> R:
scope.__exit__(None, None, None)
return result
def err_back(result: R) -> R:
# TODO: Pass the error details into `scope.__exit__(...)` for
# consistency with the other paths.
scope.__exit__(None, None, None)
return result
result.addCallbacks(call_back, err_back)
elif inspect.isawaitable(result):
async def wrap_awaitable() -> Any:
try:
assert isinstance(result, Awaitable)
awaited_result = await result
scope.__exit__(None, None, None)
return awaited_result
except Exception as e:
scope.__exit__(type(e), None, e.__traceback__)
raise
# The original method returned an awaitable, eg. a coroutine, so we
# create another awaitable wrapping it that calls
# `scope.__exit__(...)`.
return wrap_awaitable()
else:
# Just a simple sync function so we can just exit the scope and
# return the result without any fuss.
scope.__exit__(None, None, None)
return result
except Exception as e:
scope.__exit__(type(e), None, e.__traceback__)
raise
return _wrapper |
Decorator to trace a function with a custom opname.
See the module's doc string for usage examples. | def trace_with_opname(
opname: str,
*,
tracer: Optional["opentracing.Tracer"] = None,
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""
Decorator to trace a function with a custom opname.
See the module's doc string for usage examples.
"""
@contextlib.contextmanager
def _wrapping_logic(
func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
) -> Generator[None, None, None]:
with start_active_span(opname, tracer=tracer):
yield
def _decorator(func: Callable[P, R]) -> Callable[P, R]:
if not opentracing:
return func
return _custom_sync_async_decorator(func, _wrapping_logic)
return _decorator |
Decorator to trace a function.
Sets the operation name to that of the function's name.
See the module's doc string for usage examples. | def trace(func: Callable[P, R]) -> Callable[P, R]:
"""
Decorator to trace a function.
Sets the operation name to that of the function's name.
See the module's doc string for usage examples.
"""
return trace_with_opname(func.__name__)(func) |
Decorator to tag all of the args to the active span.
Args:
func: `func` is assumed to be a method taking a `self` parameter, or a
`classmethod` taking a `cls` parameter. In either case, a tag is not
created for this parameter. | def tag_args(func: Callable[P, R]) -> Callable[P, R]:
"""
Decorator to tag all of the args to the active span.
Args:
func: `func` is assumed to be a method taking a `self` parameter, or a
`classmethod` taking a `cls` parameter. In either case, a tag is not
created for this parameter.
"""
if not opentracing:
return func
# getfullargspec is somewhat expensive, so ensure it is only called a single
# time (the function signature shouldn't change anyway).
argspec = inspect.getfullargspec(func)
@contextlib.contextmanager
def _wrapping_logic(
_func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
) -> Generator[None, None, None]:
# We use `[1:]` to skip the `self` object reference and `start=1` to
# make the index line up with `argspec.args`.
#
# FIXME: We could update this to handle any type of function by ignoring the
# first argument only if it's named `self` or `cls`. This isn't fool-proof
# but handles the idiomatic cases.
for i, arg in enumerate(args[1:], start=1):
set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
yield
return _custom_sync_async_decorator(func, _wrapping_logic) |
Returns a context manager which traces a request. It starts a span
with some servlet specific tags such as the request metrics name and
request information.
Args:
request
extract_context: Whether to attempt to extract the opentracing
context from the request the servlet is handling. | def trace_servlet(
request: "SynapseRequest", extract_context: bool = False
) -> Generator[None, None, None]:
"""Returns a context manager which traces a request. It starts a span
with some servlet specific tags such as the request metrics name and
request information.
Args:
request
extract_context: Whether to attempt to extract the opentracing
context from the request the servlet is handling.
"""
if opentracing is None:
yield # type: ignore[unreachable]
return
request_tags = {
SynapseTags.REQUEST_ID: request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.get_client_ip_if_available(),
}
request_name = request.request_metrics.name
context = span_context_from_request(request) if extract_context else None
# we configure the scope not to finish the span immediately on exit, and instead
# pass the span into the SynapseRequest, which will finish it once we've finished
# sending the response to the client.
scope = start_active_span(request_name, child_of=context, finish_on_close=False)
request.set_opentracing_span(scope.span)
with scope:
inject_response_headers(request.responseHeaders)
try:
yield
finally:
# We set the operation name again in case its changed (which happens
# with JsonResource).
scope.span.set_operation_name(request.request_metrics.name)
# Mypy seems to think that start_context.tag below can be Optional[str], but
# that doesn't appear to be correct and works in practice.
request_tags[
SynapseTags.REQUEST_TAG
] = request.request_metrics.start_context.tag # type: ignore[assignment]
# set the tags *after* the servlet completes, in case it decided to
# prioritise the span (tags will get dropped on unprioritised spans)
for k, v in request_tags.items():
scope.span.set_tag(k, v) |
Takes a function that returns a relative path and turns it into an
absolute path based on the location of the primary media store | def _wrap_in_base_path(func: F) -> F:
"""Takes a function that returns a relative path and turns it into an
absolute path based on the location of the primary media store
"""
@functools.wraps(func)
def _wrapped(self: "MediaFilePaths", *args: Any, **kwargs: Any) -> str:
path = func(self, *args, **kwargs)
return os.path.join(self.base_path, path)
return cast(F, _wrapped) |
Wraps a path-returning method to check that the returned path(s) do not escape
the media store directory.
The path-returning method may return either a single path, or a list of paths.
The check is not expected to ever fail, unless `func` is missing a call to
`_validate_path_component`, or `_validate_path_component` is buggy.
Args:
relative: A boolean indicating whether the wrapped method returns paths relative
to the media store directory.
Returns:
A method which will wrap a path-returning method, adding a check to ensure that
the returned path(s) lie within the media store directory. The check will raise
a `ValueError` if it fails. | def _wrap_with_jail_check(relative: bool) -> Callable[[GetPathMethod], GetPathMethod]:
"""Wraps a path-returning method to check that the returned path(s) do not escape
the media store directory.
The path-returning method may return either a single path, or a list of paths.
The check is not expected to ever fail, unless `func` is missing a call to
`_validate_path_component`, or `_validate_path_component` is buggy.
Args:
relative: A boolean indicating whether the wrapped method returns paths relative
to the media store directory.
Returns:
A method which will wrap a path-returning method, adding a check to ensure that
the returned path(s) lie within the media store directory. The check will raise
a `ValueError` if it fails.
"""
def _wrap_with_jail_check_inner(func: GetPathMethod) -> GetPathMethod:
@functools.wraps(func)
def _wrapped(
self: "MediaFilePaths", *args: Any, **kwargs: Any
) -> Union[str, List[str]]:
path_or_paths = func(self, *args, **kwargs)
if isinstance(path_or_paths, list):
paths_to_check = path_or_paths
else:
paths_to_check = [path_or_paths]
for path in paths_to_check:
# Construct the path that will ultimately be used.
# We cannot guess whether `path` is relative to the media store
# directory, since the media store directory may itself be a relative
# path.
if relative:
path = os.path.join(self.base_path, path)
normalized_path = os.path.normpath(path)
# Now that `normpath` has eliminated `../`s and `./`s from the path,
# `os.path.commonpath` can be used to check whether it lies within the
# media store directory.
if (
os.path.commonpath([normalized_path, self.normalized_base_path])
!= self.normalized_base_path
):
# The path resolves to outside the media store directory,
# or `self.base_path` is `.`, which is an unlikely configuration.
raise ValueError(f"Invalid media store path: {path!r}")
# Note that `os.path.normpath`/`abspath` has a subtle caveat:
# `a/b/c/../c` will normalize to `a/b/c`, but the former refers to a
# different path if `a/b/c` is a symlink. That is, the check above is
# not perfect and may allow a certain restricted subset of untrustworthy
# paths through. Since the check above is secondary to the main
# `_validate_path_component` checks, it's less important for it to be
# perfect.
#
# As an alternative, `os.path.realpath` will resolve symlinks, but
# proves problematic if there are symlinks inside the media store.
# eg. if `url_store/` is symlinked to elsewhere, its canonical path
# won't match that of the main media store directory.
return path_or_paths
return cast(GetPathMethod, _wrapped)
return _wrap_with_jail_check_inner |
Checks that the given string can be safely used as a path component
Args:
name: The path component to check.
Returns:
The path component if valid.
Raises:
ValueError: If `name` cannot be safely used as a path component. | def _validate_path_component(name: str) -> str:
"""Checks that the given string can be safely used as a path component
Args:
name: The path component to check.
Returns:
The path component if valid.
Raises:
ValueError: If `name` cannot be safely used as a path component.
"""
if not ALLOWED_CHARACTERS.issuperset(name) or name in FORBIDDEN_NAMES:
raise ValueError(f"Invalid path component: {name!r}")
return name |
Write `source` to the file like `dest` synchronously. Should be called
from a thread.
Args:
source: A file like object that's to be written
dest: A file like object to be written to | def _write_file_synchronously(source: IO, dest: IO) -> None:
"""Write `source` to the file like `dest` synchronously. Should be called
from a thread.
Args:
source: A file like object that's to be written
dest: A file like object to be written to
"""
source.seek(0) # Ensure we read from the start of the file
shutil.copyfileobj(source, dest) |
Calculate description for an HTML document.
This uses lxml to convert the HTML document into plaintext. If errors
occur during processing of the document, an empty response is returned.
Args:
open_graph_response: The current Open Graph summary. This is updated with additional fields.
html_body: The HTML document, as bytes.
Returns:
The summary | def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) -> None:
"""
Calculate description for an HTML document.
This uses lxml to convert the HTML document into plaintext. If errors
occur during processing of the document, an empty response is returned.
Args:
open_graph_response: The current Open Graph summary. This is updated with additional fields.
html_body: The HTML document, as bytes.
Returns:
The summary
"""
# If there's no body, nothing useful is going to be found.
if not html_body:
return
from lxml import etree
# Create an HTML parser. If this fails, log and return no metadata.
parser = etree.HTMLParser(recover=True, encoding="utf-8")
# Attempt to parse the body. If this fails, log and return no metadata.
# TODO Develop of lxml-stubs has this correct.
tree = etree.fromstring(html_body, parser) # type: ignore[arg-type]
# The data was successfully parsed, but no tree was found.
if tree is None:
return # type: ignore[unreachable]
# Attempt to find interesting URLs (images, videos, embeds).
if "og:image" not in open_graph_response:
image_urls = _fetch_urls(tree, "img")
if image_urls:
open_graph_response["og:image"] = image_urls[0]
video_urls = _fetch_urls(tree, "video") + _fetch_urls(tree, "embed")
if video_urls:
open_graph_response["og:video"] = video_urls[0]
description = parse_html_description(tree)
if description:
open_graph_response["og:description"] = description |
Use the Python codec's name as the normalised entry. | def _normalise_encoding(encoding: str) -> Optional[str]:
"""Use the Python codec's name as the normalised entry."""
try:
return codecs.lookup(encoding).name
except LookupError:
return None |
Get potential encoding of the body based on the (presumably) HTML body or the content-type header.
The precedence used for finding a character encoding is:
1. <meta> tag with a charset declared.
2. The XML document's character encoding attribute.
3. The Content-Type header.
4. Fallback to utf-8.
5. Fallback to windows-1252.
This roughly follows the algorithm used by BeautifulSoup's bs4.dammit.EncodingDetector.
Args:
body: The HTML document, as bytes.
content_type: The Content-Type header.
Returns:
The character encoding of the body, as a string. | def _get_html_media_encodings(
body: bytes, content_type: Optional[str]
) -> Iterable[str]:
"""
Get potential encoding of the body based on the (presumably) HTML body or the content-type header.
The precedence used for finding a character encoding is:
1. <meta> tag with a charset declared.
2. The XML document's character encoding attribute.
3. The Content-Type header.
4. Fallback to utf-8.
5. Fallback to windows-1252.
This roughly follows the algorithm used by BeautifulSoup's bs4.dammit.EncodingDetector.
Args:
body: The HTML document, as bytes.
content_type: The Content-Type header.
Returns:
The character encoding of the body, as a string.
"""
# There's no point in returning an encoding more than once.
attempted_encodings: Set[str] = set()
# Limit searches to the first 1kb, since it ought to be at the top.
body_start = body[:1024]
# Check if it has an encoding set in a meta tag.
match = _charset_match.search(body_start)
if match:
encoding = _normalise_encoding(match.group(1).decode("ascii"))
if encoding:
attempted_encodings.add(encoding)
yield encoding
# TODO Support <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
# Check if it has an XML document with an encoding.
match = _xml_encoding_match.match(body_start)
if match:
encoding = _normalise_encoding(match.group(1).decode("ascii"))
if encoding and encoding not in attempted_encodings:
attempted_encodings.add(encoding)
yield encoding
# Check the HTTP Content-Type header for a character set.
if content_type:
content_match = _content_type_match.match(content_type)
if content_match:
encoding = _normalise_encoding(content_match.group(1))
if encoding and encoding not in attempted_encodings:
attempted_encodings.add(encoding)
yield encoding
# Finally, fallback to UTF-8, then windows-1252.
for fallback in ("utf-8", "cp1252"):
if fallback not in attempted_encodings:
yield fallback |
This uses lxml to parse the HTML document.
Args:
body: The HTML document, as bytes.
uri: The URI used to download the body.
content_type: The Content-Type header.
Returns:
The parsed HTML body, or None if an error occurred during processed. | def decode_body(
body: bytes, uri: str, content_type: Optional[str] = None
) -> Optional["etree._Element"]:
"""
This uses lxml to parse the HTML document.
Args:
body: The HTML document, as bytes.
uri: The URI used to download the body.
content_type: The Content-Type header.
Returns:
The parsed HTML body, or None if an error occurred during processed.
"""
# If there's no body, nothing useful is going to be found.
if not body:
return None
# The idea here is that multiple encodings are tried until one works.
# Unfortunately the result is never used and then LXML will decode the string
# again with the found encoding.
for encoding in _get_html_media_encodings(body, content_type):
try:
body.decode(encoding)
except Exception:
pass
else:
break
else:
logger.warning("Unable to decode HTML body for %s", uri)
return None
from lxml import etree
# Create an HTML parser.
parser = etree.HTMLParser(recover=True, encoding=encoding)
# Attempt to parse the body. Returns None if the body was successfully
# parsed, but no tree was found.
# TODO Develop of lxml-stubs has this correct.
return etree.fromstring(body, parser) |
Search for meta tags prefixed with a particular string.
Args:
tree: The parsed HTML document.
property: The name of the property which contains the tag name, e.g.
"property" for Open Graph.
prefix: The prefix on the property to search for, e.g. "og" for Open Graph.
property_mapper: An optional callable to map the property to the Open Graph
form. Can return None for a key to ignore that key.
Returns:
A map of tag name to value. | def _get_meta_tags(
tree: "etree._Element",
property: str,
prefix: str,
property_mapper: Optional[Callable[[str], Optional[str]]] = None,
) -> Dict[str, Optional[str]]:
"""
Search for meta tags prefixed with a particular string.
Args:
tree: The parsed HTML document.
property: The name of the property which contains the tag name, e.g.
"property" for Open Graph.
prefix: The prefix on the property to search for, e.g. "og" for Open Graph.
property_mapper: An optional callable to map the property to the Open Graph
form. Can return None for a key to ignore that key.
Returns:
A map of tag name to value.
"""
# This actually returns Dict[str, str], but the caller sets this as a variable
# which is Dict[str, Optional[str]].
results: Dict[str, Optional[str]] = {}
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
for tag in cast(
List["etree._Element"],
tree.xpath(
f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]"
),
):
# if we've got more than 50 tags, someone is taking the piss
if len(results) >= 50:
logger.warning(
"Skipping parsing of Open Graph for page with too many '%s:' tags",
prefix,
)
return {}
key = cast(str, tag.attrib[property])
if property_mapper:
new_key = property_mapper(key)
# None is a special value used to ignore a value.
if new_key is None:
continue
key = new_key
results[key] = cast(str, tag.attrib["content"])
return results |
Map a Twitter card property to the analogous Open Graph property.
Args:
key: The Twitter card property (starts with "twitter:").
Returns:
The Open Graph property (starts with "og:") or None to have this property
be ignored. | def _map_twitter_to_open_graph(key: str) -> Optional[str]:
"""
Map a Twitter card property to the analogous Open Graph property.
Args:
key: The Twitter card property (starts with "twitter:").
Returns:
The Open Graph property (starts with "og:") or None to have this property
be ignored.
"""
# Twitter card properties with no analogous Open Graph property.
if key == "twitter:card" or key == "twitter:creator":
return None
if key == "twitter:site":
return "og:site_name"
# Otherwise, swap twitter to og.
return "og" + key[7:] |
Parse the HTML document into an Open Graph response.
This uses lxml to search the HTML document for Open Graph data (or
synthesizes it from the document).
Args:
tree: The parsed HTML document.
Returns:
The Open Graph response as a dictionary. | def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]]:
"""
Parse the HTML document into an Open Graph response.
This uses lxml to search the HTML document for Open Graph data (or
synthesizes it from the document).
Args:
tree: The parsed HTML document.
Returns:
The Open Graph response as a dictionary.
"""
# Search for Open Graph (og:) meta tags, e.g.:
#
# "og:type" : "video",
# "og:url" : "https://www.youtube.com/watch?v=LXDBoHyjmtw",
# "og:site_name" : "YouTube",
# "og:video:type" : "application/x-shockwave-flash",
# "og:description" : "Fun stuff happening here",
# "og:title" : "RemoteJam - Matrix team hack for Disrupt Europe Hackathon",
# "og:image" : "https://i.ytimg.com/vi/LXDBoHyjmtw/maxresdefault.jpg",
# "og:video:url" : "http://www.youtube.com/v/LXDBoHyjmtw?version=3&autohide=1",
# "og:video:width" : "1280"
# "og:video:height" : "720",
# "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3",
og = _get_meta_tags(tree, "property", "og")
# TODO: Search for properties specific to the different Open Graph types,
# such as article: meta tags, e.g.:
#
# "article:publisher" : "https://www.facebook.com/thethudonline" />
# "article:author" content="https://www.facebook.com/thethudonline" />
# "article:tag" content="baby" />
# "article:section" content="Breaking News" />
# "article:published_time" content="2016-03-31T19:58:24+00:00" />
# "article:modified_time" content="2016-04-01T18:31:53+00:00" />
# Search for Twitter Card (twitter:) meta tags, e.g.:
#
# "twitter:site" : "@matrixdotorg"
# "twitter:creator" : "@matrixdotorg"
#
# Twitter cards tags also duplicate Open Graph tags.
#
# See https://developer.twitter.com/en/docs/twitter-for-websites/cards/guides/getting-started
twitter = _get_meta_tags(tree, "name", "twitter", _map_twitter_to_open_graph)
# Merge the Twitter values with the Open Graph values, but do not overwrite
# information from Open Graph tags.
for key, value in twitter.items():
if key not in og:
og[key] = value
if "og:title" not in og:
# Attempt to find a title from the title tag, or the biggest header on the page.
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
title = cast(
List["etree._ElementUnicodeResult"],
tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()"),
)
if title:
og["og:title"] = title[0].strip()
else:
og["og:title"] = None
if "og:image" not in og:
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
meta_image = cast(
List["etree._ElementUnicodeResult"],
tree.xpath(
"//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]"
),
)
# If a meta image is found, use it.
if meta_image:
og["og:image"] = meta_image[0]
else:
# Try to find images which are larger than 10px by 10px.
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
#
# TODO: consider inlined CSS styles as well as width & height attribs
images = cast(
List["etree._Element"],
tree.xpath("//img[@src][number(@width)>10][number(@height)>10]"),
)
images = sorted(
images,
key=lambda i: (
-1 * float(i.attrib["width"]) * float(i.attrib["height"])
),
)
# If no images were found, try to find *any* images.
if not images:
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
images = cast(List["etree._Element"], tree.xpath("//img[@src][1]"))
if images:
og["og:image"] = cast(str, images[0].attrib["src"])
# Finally, fallback to the favicon if nothing else.
else:
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
favicons = cast(
List["etree._ElementUnicodeResult"],
tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]"),
)
if favicons:
og["og:image"] = favicons[0]
if "og:description" not in og:
# Check the first meta description tag for content.
# Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
meta_description = cast(
List["etree._ElementUnicodeResult"],
tree.xpath(
"//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]"
),
)
# If a meta description is found with content, use it.
if meta_description:
og["og:description"] = meta_description[0]
else:
og["og:description"] = parse_html_description(tree)
elif og["og:description"]:
# This must be a non-empty string at this point.
assert isinstance(og["og:description"], str)
og["og:description"] = summarize_paragraphs([og["og:description"]])
# TODO: delete the url downloads to stop diskfilling,
# as we only ever cared about its OG
return og |
Calculate a text description based on an HTML document.
Grabs any text nodes which are inside the <body/> tag, unless they are within
an HTML5 semantic markup tag (<header/>, <nav/>, <aside/>, <footer/>), or
if they are within a <script/>, <svg/> or <style/> tag, or if they are within
a tag whose content is usually only shown to old browsers
(<iframe/>, <video/>, <canvas/>, <picture/>).
This is a very very very coarse approximation to a plain text render of the page.
Args:
tree: The parsed HTML document.
Returns:
The plain text description, or None if one cannot be generated. | def parse_html_description(tree: "etree._Element") -> Optional[str]:
"""
Calculate a text description based on an HTML document.
Grabs any text nodes which are inside the <body/> tag, unless they are within
an HTML5 semantic markup tag (<header/>, <nav/>, <aside/>, <footer/>), or
if they are within a <script/>, <svg/> or <style/> tag, or if they are within
a tag whose content is usually only shown to old browsers
(<iframe/>, <video/>, <canvas/>, <picture/>).
This is a very very very coarse approximation to a plain text render of the page.
Args:
tree: The parsed HTML document.
Returns:
The plain text description, or None if one cannot be generated.
"""
# We don't just use XPATH here as that is slow on some machines.
from lxml import etree
TAGS_TO_REMOVE = {
"header",
"nav",
"aside",
"footer",
"script",
"noscript",
"style",
"svg",
"iframe",
"video",
"canvas",
"img",
"picture",
# etree.Comment is a function which creates an etree._Comment element.
# The "tag" attribute of an etree._Comment instance is confusingly the
# etree.Comment function instead of a string.
etree.Comment,
}
# Split all the text nodes into paragraphs (by splitting on new
# lines)
text_nodes = (
re.sub(r"\s+", "\n", el).strip()
for el in _iterate_over_text(tree.find("body"), TAGS_TO_REMOVE)
)
return summarize_paragraphs(text_nodes) |
Iterate over the tree returning text nodes in a depth first fashion,
skipping text nodes inside certain tags.
Args:
tree: The parent element to iterate. Can be None if there isn't one.
tags_to_ignore: Set of tags to ignore
stack_limit: Maximum stack size limit for depth-first traversal.
Nodes will be dropped if this limit is hit, which may truncate the
textual result.
Intended to limit the maximum working memory when generating a preview. | def _iterate_over_text(
tree: Optional["etree._Element"],
tags_to_ignore: Set[object],
stack_limit: int = 1024,
) -> Generator[str, None, None]:
"""Iterate over the tree returning text nodes in a depth first fashion,
skipping text nodes inside certain tags.
Args:
tree: The parent element to iterate. Can be None if there isn't one.
tags_to_ignore: Set of tags to ignore
stack_limit: Maximum stack size limit for depth-first traversal.
Nodes will be dropped if this limit is hit, which may truncate the
textual result.
Intended to limit the maximum working memory when generating a preview.
"""
if tree is None:
return
# This is a stack whose items are elements to iterate over *or* strings
# to be returned.
elements: List[Union[str, "etree._Element"]] = [tree]
while elements:
el = elements.pop()
if isinstance(el, str):
yield el
elif el.tag not in tags_to_ignore:
# If the element isn't meant for display, ignore it.
if el.get("role") in ARIA_ROLES_TO_IGNORE:
continue
# el.text is the text before the first child, so we can immediately
# return it if the text exists.
if el.text:
yield el.text
# We add to the stack all the element's children, interspersed with
# each child's tail text (if it exists).
#
# We iterate in reverse order so that earlier pieces of text appear
# closer to the top of the stack.
for child in el.iterchildren(reversed=True):
if len(elements) > stack_limit:
# We've hit our limit for working memory
break
if child.tail:
# The tail text of a node is text that comes *after* the node,
# so we always include it even if we ignore the child node.
elements.append(child.tail)
elements.append(child) |
Try to get a summary respecting first paragraph and then word boundaries.
Args:
text_nodes: The paragraphs to summarize.
min_size: The minimum number of words to include.
max_size: The maximum number of words to include.
Returns:
A summary of the text nodes, or None if that was not possible. | def summarize_paragraphs(
text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500
) -> Optional[str]:
"""
Try to get a summary respecting first paragraph and then word boundaries.
Args:
text_nodes: The paragraphs to summarize.
min_size: The minimum number of words to include.
max_size: The maximum number of words to include.
Returns:
A summary of the text nodes, or None if that was not possible.
"""
# TODO: Respect sentences?
description = ""
# Keep adding paragraphs until we get to the MIN_SIZE.
for text_node in text_nodes:
if len(description) < min_size:
text_node = re.sub(r"[\t \r\n]+", " ", text_node)
description += text_node + "\n\n"
else:
break
description = description.strip()
description = re.sub(r"[\t ]+", " ", description)
description = re.sub(r"[\t \r\n]*[\r\n]+", "\n\n", description)
# If the concatenation of paragraphs to get above MIN_SIZE
# took us over MAX_SIZE, then we need to truncate mid paragraph
if len(description) > max_size:
new_desc = ""
# This splits the paragraph into words, but keeping the
# (preceding) whitespace intact so we can easily concat
# words back together.
for match in re.finditer(r"\s*\S+", description):
word = match.group()
# Keep adding words while the total length is less than
# MAX_SIZE.
if len(word) + len(new_desc) < max_size:
new_desc += word
else:
# At this point the next word *will* take us over
# MAX_SIZE, but we also want to ensure that its not
# a huge word. If it is add it anyway and we'll
# truncate later.
if len(new_desc) < min_size:
new_desc += word
break
# Double check that we're not over the limit
if len(new_desc) > max_size:
new_desc = new_desc[:max_size]
# We always add an ellipsis because at the very least
# we chopped mid paragraph.
description = new_desc.strip() + "…"
return description if description else None |
Returns True for content types for which we will perform URL preview and False
otherwise. | def _is_previewable(content_type: str) -> bool:
"""Returns True for content types for which we will perform URL preview and False
otherwise."""
return _is_html(content_type) or _is_media(content_type) or _is_json(content_type) |
Adds the correct response headers in preparation for responding with the
media.
Args:
request
media_type: The media/content type.
file_size: Size in bytes of the media, if known.
upload_name: The name of the requested file, if any. | def add_file_headers(
request: Request,
media_type: str,
file_size: Optional[int],
upload_name: Optional[str],
) -> None:
"""Adds the correct response headers in preparation for responding with the
media.
Args:
request
media_type: The media/content type.
file_size: Size in bytes of the media, if known.
upload_name: The name of the requested file, if any.
"""
def _quote(x: str) -> str:
return urllib.parse.quote(x.encode("utf-8"))
# Default to a UTF-8 charset for text content types.
# ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
if media_type.lower() in TEXT_CONTENT_TYPES:
content_type = media_type + "; charset=UTF-8"
else:
content_type = media_type
request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
# A strict subset of content types is allowed to be inlined so that they may
# be viewed directly in a browser. Other file types are forced to be downloads.
#
# Only the type & subtype are important, parameters can be ignored.
if media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
disposition = "inline"
else:
disposition = "attachment"
if upload_name:
# RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
#
# `filename` is defined to be a `value`, which is defined by RFC2616
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
# US-ASCII string surrounded by double-quotes, using backslash as an
# escape character. Note that %-encoding is *not* permitted.
#
# `filename*` is defined to be an `ext-value`, which is defined in
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
# where `value-chars` is essentially a %-encoded string in the given charset.
#
# [1]: https://tools.ietf.org/html/rfc6266#section-4.1
# [2]: https://tools.ietf.org/html/rfc2616#section-3.6
# [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
# We avoid the quoted-string version of `filename`, because (a) synapse didn't
# correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
# may as well just do the filename* version.
if _can_encode_filename_as_token(upload_name):
disposition = "%s; filename=%s" % (
disposition,
upload_name,
)
else:
disposition = "%s; filename*=utf-8''%s" % (
disposition,
_quote(upload_name),
)
request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
# cache for at least a day.
# XXX: we might want to turn this off for data we don't want to
# recommend caching as it's sensitive or private - or at least
# select private. don't bother setting Expires as all our
# clients are smart enough to be happy with Cache-Control
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
if file_size is not None:
request.setHeader(b"Content-Length", b"%d" % (file_size,))
# Tell web crawlers to not index, archive, or follow links in media. This
# should help to prevent things in the media repo from showing up in web
# search results.
request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") |
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers: The HTTP request headers.
Returns:
The filename, or None. | def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
"""
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers: The HTTP request headers.
Returns:
The filename, or None.
"""
content_disposition = headers.get(b"Content-Disposition", [b""])
# No header, bail out.
if not content_disposition[0]:
return None
_, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
# We have a filename*= section. This MUST be ASCII, and any UTF-8
# bytes are %-quoted.
try:
# Once it is decoded, we can then unquote the %-encoded
# parts strictly into a unicode string.
upload_name = urllib.parse.unquote(
upload_name_utf8.decode("ascii"), errors="strict"
)
except UnicodeDecodeError:
# Incorrect UTF-8.
pass
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii.decode("ascii")
# This may be None here, indicating we did not find a matching name.
return upload_name |
Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line: header to be parsed
Returns:
The main content-type, followed by the parameter dictionary | def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
"""Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line: header to be parsed
Returns:
The main content-type, followed by the parameter dictionary
"""
parts = _parseparam(b";" + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b"=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
# strip double-quotes
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
pdict[name] = value
return key, pdict |
Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s: header to be parsed
Returns:
The split input | def _parseparam(s: bytes) -> Generator[bytes, None, None]:
"""Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s: header to be parsed
Returns:
The split input
"""
while s[:1] == b";":
s = s[1:]
# look for the next ;
end = s.find(b";")
# if there is an odd number of " marks between here and the next ;, skip to the
# next ; instead
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:] |
Run the given function in its own logcontext, with resource metrics
This should be used to wrap processes which are fired off to run in the
background, instead of being associated with a particular request.
It returns a Deferred which completes when the function completes, but it doesn't
follow the synapse logcontext rules, which makes it appropriate for passing to
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
normal synapse async function).
Args:
desc: a description for this background process type
func: a function, which may return a Deferred or a coroutine
bg_start_span: Whether to start an opentracing span. Defaults to True.
Should only be disabled for processes that will not log to or tag
a span.
args: positional args for func
kwargs: keyword args for func
Returns:
Deferred which returns the result of func, or `None` if func raises.
Note that the returned Deferred does not follow the synapse logcontext
rules. | def run_as_background_process(
desc: "LiteralString",
func: Callable[..., Awaitable[Optional[R]]],
*args: Any,
bg_start_span: bool = True,
**kwargs: Any,
) -> "defer.Deferred[Optional[R]]":
"""Run the given function in its own logcontext, with resource metrics
This should be used to wrap processes which are fired off to run in the
background, instead of being associated with a particular request.
It returns a Deferred which completes when the function completes, but it doesn't
follow the synapse logcontext rules, which makes it appropriate for passing to
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
normal synapse async function).
Args:
desc: a description for this background process type
func: a function, which may return a Deferred or a coroutine
bg_start_span: Whether to start an opentracing span. Defaults to True.
Should only be disabled for processes that will not log to or tag
a span.
args: positional args for func
kwargs: keyword args for func
Returns:
Deferred which returns the result of func, or `None` if func raises.
Note that the returned Deferred does not follow the synapse logcontext
rules.
"""
async def run() -> Optional[R]:
with _bg_metrics_lock:
count = _background_process_counts.get(desc, 0)
_background_process_counts[desc] = count + 1
_background_process_start_count.labels(desc).inc()
_background_process_in_flight_count.labels(desc).inc()
with BackgroundProcessLoggingContext(desc, count) as context:
try:
if bg_start_span:
ctx = start_active_span(
f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
)
else:
ctx = nullcontext() # type: ignore[assignment]
with ctx:
return await func(*args, **kwargs)
except Exception:
logger.exception(
"Background process '%s' threw an exception",
desc,
)
return None
finally:
_background_process_in_flight_count.labels(desc).dec()
with PreserveLoggingContext():
# Note that we return a Deferred here so that it can be used in a
# looping_call and other places that expect a Deferred.
return defer.ensureDeferred(run()) |
Decorator that wraps an asynchronous function `func`, returning a synchronous
decorated function. Calling the decorated version runs `func` as a background
process, forwarding all arguments verbatim.
That is,
@wrap_as_background_process
def func(*args): ...
func(1, 2, third=3)
is equivalent to:
def func(*args): ...
run_as_background_process(func, 1, 2, third=3)
The former can be convenient if `func` needs to be run as a background process in
multiple places. | def wrap_as_background_process(
desc: "LiteralString",
) -> Callable[
[Callable[P, Awaitable[Optional[R]]]],
Callable[P, "defer.Deferred[Optional[R]]"],
]:
"""Decorator that wraps an asynchronous function `func`, returning a synchronous
decorated function. Calling the decorated version runs `func` as a background
process, forwarding all arguments verbatim.
That is,
@wrap_as_background_process
def func(*args): ...
func(1, 2, third=3)
is equivalent to:
def func(*args): ...
run_as_background_process(func, 1, 2, third=3)
The former can be convenient if `func` needs to be run as a background process in
multiple places.
"""
def wrap_as_background_process_inner(
func: Callable[P, Awaitable[Optional[R]]]
) -> Callable[P, "defer.Deferred[Optional[R]]"]:
@wraps(func)
def wrap_as_background_process_inner_2(
*args: P.args, **kwargs: P.kwargs
) -> "defer.Deferred[Optional[R]]":
# type-ignore: mypy is confusing kwargs with the bg_start_span kwarg.
# Argument 4 to "run_as_background_process" has incompatible type
# "**P.kwargs"; expected "bool"
# See https://github.com/python/mypy/issues/8862
return run_as_background_process(desc, func, *args, **kwargs) # type: ignore[arg-type]
return wrap_as_background_process_inner_2
return wrap_as_background_process_inner |
Returns an interface to jemalloc, if it is being used.
Note that this will always return None until `setup_jemalloc_stats` has been
called. | def get_jemalloc_stats() -> Optional[JemallocStats]:
"""Returns an interface to jemalloc, if it is being used.
Note that this will always return None until `setup_jemalloc_stats` has been
called.
"""
return _JEMALLOC_STATS |
Checks to see if jemalloc is loaded, and hooks up a collector to record
statistics exposed by jemalloc. | def _setup_jemalloc_stats() -> None:
"""Checks to see if jemalloc is loaded, and hooks up a collector to record
statistics exposed by jemalloc.
"""
global _JEMALLOC_STATS
# Try to find the loaded jemalloc shared library, if any. We need to
# introspect into what is loaded, rather than loading whatever is on the
# path, as if we load a *different* jemalloc version things will seg fault.
# We look in `/proc/self/maps`, which only exists on linux.
if not os.path.exists("/proc/self/maps"):
logger.debug("Not looking for jemalloc as no /proc/self/maps exist")
return
# We're looking for a path at the end of the line that includes
# "libjemalloc".
regex = re.compile(r"/\S+/libjemalloc.*$")
jemalloc_path = None
with open("/proc/self/maps") as f:
for line in f:
match = regex.search(line.strip())
if match:
jemalloc_path = match.group()
if not jemalloc_path:
# No loaded jemalloc was found.
logger.debug("jemalloc not found")
return
logger.debug("Found jemalloc at %s", jemalloc_path)
jemalloc_dll = ctypes.CDLL(jemalloc_path)
stats = JemallocStats(jemalloc_dll)
_JEMALLOC_STATS = stats
class JemallocCollector(Collector):
"""Metrics for internal jemalloc stats."""
def collect(self) -> Iterable[Metric]:
stats.refresh_stats()
g = GaugeMetricFamily(
"jemalloc_stats_app_memory_bytes",
"The stats reported by jemalloc",
labels=["type"],
)
# Read the relevant global stats from jemalloc. Note that these may
# not be accurate if python is configured to use its internal small
# object allocator (which is on by default, disable by setting the
# env `PYTHONMALLOC=malloc`).
#
# See the jemalloc manpage for details about what each value means,
# roughly:
# - allocated ─ Total number of bytes allocated by the app
# - active ─ Total number of bytes in active pages allocated by
# the application, this is bigger than `allocated`.
# - resident ─ Maximum number of bytes in physically resident data
# pages mapped by the allocator, comprising all pages dedicated
# to allocator metadata, pages backing active allocations, and
# unused dirty pages. This is bigger than `active`.
# - mapped ─ Total number of bytes in active extents mapped by the
# allocator.
# - metadata ─ Total number of bytes dedicated to jemalloc
# metadata.
for t in (
"allocated",
"active",
"resident",
"mapped",
"metadata",
):
try:
value = stats.get_stat(t)
except Exception as e:
# There was an error fetching the value, skip.
logger.warning("Failed to read jemalloc stats.%s: %s", t, e)
continue
g.add_metric([t], value=value)
yield g
REGISTRY.register(JemallocCollector())
logger.debug("Added jemalloc stats") |
Try to setup jemalloc stats, if jemalloc is loaded. | def setup_jemalloc_stats() -> None:
"""Try to setup jemalloc stats, if jemalloc is loaded."""
try:
_setup_jemalloc_stats()
except Exception as e:
# This should only happen if we find the loaded jemalloc library, but
# fail to load it somehow (e.g. we somehow picked the wrong version).
logger.info("Failed to setup collector to record jemalloc stats: %s", e) |
Disable automatic GC, and replace it with a task that runs every 100ms
This means that (a) we can limit how often GC runs; (b) we can get some metrics
about GC activity.
It does nothing on PyPy. | def install_gc_manager() -> None:
"""Disable automatic GC, and replace it with a task that runs every 100ms
This means that (a) we can limit how often GC runs; (b) we can get some metrics
about GC activity.
It does nothing on PyPy.
"""
if running_on_pypy:
return
REGISTRY.register(GCCounts())
gc.disable()
# The time (in seconds since the epoch) of the last time we did a GC for each generation.
_last_gc = [0.0, 0.0, 0.0]
def _maybe_gc() -> None:
# Check if we need to do a manual GC (since its been disabled), and do
# one if necessary. Note we go in reverse order as e.g. a gen 1 GC may
# promote an object into gen 2, and we don't want to handle the same
# object multiple times.
threshold = gc.get_threshold()
counts = gc.get_count()
end = time.time()
for i in (2, 1, 0):
# We check if we need to do one based on a straightforward
# comparison between the threshold and count. We also do an extra
# check to make sure that we don't a GC too often.
if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]:
if i == 0:
logger.debug("Collecting gc %d", i)
else:
logger.info("Collecting gc %d", i)
start = time.time()
unreachable = gc.collect(i)
end = time.time()
_last_gc[i] = end
gc_time.labels(i).observe(end - start)
gc_unreachable.labels(i).set(unreachable)
gc_task = task.LoopingCall(_maybe_gc)
gc_task.start(0.1) |
Add metrics for the threadpool. | def register_threadpool(name: str, threadpool: ThreadPool) -> None:
"""Add metrics for the threadpool."""
threadpool_total_min_threads.labels(name).set(threadpool.min)
threadpool_total_max_threads.labels(name).set(threadpool.max)
threadpool_total_threads.labels(name).set_function(lambda: len(threadpool.threads))
threadpool_total_working_threads.labels(name).set_function(
lambda: len(threadpool.working)
) |
Returns a decorator that applies a memoizing cache around the function. This
decorator behaves similarly to functools.lru_cache.
Example:
@cached()
def foo('a', 'b'):
...
Added in Synapse v1.74.0.
Args:
max_entries: The maximum number of entries in the cache. If the cache is full
and a new entry is added, the least recently accessed entry will be evicted
from the cache.
num_args: The number of positional arguments (excluding `self`) to use as cache
keys. Defaults to all named args of the function.
uncached_args: A list of argument names to not use as the cache key. (`self` is
always ignored.) Cannot be used with num_args.
Returns:
A decorator that applies a memoizing cache around the function. | def cached(
*,
max_entries: int = 1000,
num_args: Optional[int] = None,
uncached_args: Optional[Collection[str]] = None,
) -> Callable[[F], CachedFunction[F]]:
"""Returns a decorator that applies a memoizing cache around the function. This
decorator behaves similarly to functools.lru_cache.
Example:
@cached()
def foo('a', 'b'):
...
Added in Synapse v1.74.0.
Args:
max_entries: The maximum number of entries in the cache. If the cache is full
and a new entry is added, the least recently accessed entry will be evicted
from the cache.
num_args: The number of positional arguments (excluding `self`) to use as cache
keys. Defaults to all named args of the function.
uncached_args: A list of argument names to not use as the cache key. (`self` is
always ignored.) Cannot be used with num_args.
Returns:
A decorator that applies a memoizing cache around the function.
"""
return _cached(
max_entries=max_entries,
num_args=num_args,
uncached_args=uncached_args,
) |
Wrapper that loads spam checkers configured using the old configuration, and
registers the spam checker hooks they implement. | def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
"""Wrapper that loads spam checkers configured using the old configuration, and
registers the spam checker hooks they implement.
"""
spam_checkers: List[Any] = []
api = hs.get_module_api()
for module, config in hs.config.spamchecker.spam_checkers:
# Older spam checkers don't accept the `api` argument, so we
# try and detect support.
spam_args = inspect.getfullargspec(module)
if "api" in spam_args.args:
spam_checkers.append(module(config=config, api=api))
else:
spam_checkers.append(module(config=config))
# The known spam checker hooks. If a spam checker module implements a method
# which name appears in this set, we'll want to register it.
spam_checker_methods = {
"check_event_for_spam",
"user_may_invite",
"user_may_create_room",
"user_may_create_room_alias",
"user_may_publish_room",
"check_username_for_spam",
"check_registration_for_spam",
"check_media_file_for_spam",
}
for spam_checker in spam_checkers:
# Methods on legacy spam checkers might not be async, so we wrap them around a
# wrapper that will call maybe_awaitable on the result.
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
wrapped_func = f
if f.__name__ == "check_registration_for_spam":
checker_args = inspect.signature(f)
if len(checker_args.parameters) == 3:
# Backwards compatibility; some modules might implement a hook that
# doesn't expect a 4th argument. In this case, wrap it in a function
# that gives it only 3 arguments and drops the auth_provider_id on
# the floor.
def wrapper(
email_threepid: Optional[dict],
username: Optional[str],
request_info: Collection[Tuple[str, str]],
auth_provider_id: Optional[str],
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
# Assertion required because mypy can't prove we won't
# change `f` back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None
return f(
email_threepid,
username,
request_info,
)
wrapped_func = wrapper
elif len(checker_args.parameters) != 4:
raise RuntimeError(
"Bad signature for callback check_registration_for_spam",
)
def run(*args: Any, **kwargs: Any) -> Awaitable:
# Assertion required because mypy can't prove we won't change `f`
# back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert wrapped_func is not None
return maybe_awaitable(wrapped_func(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks = {
hook: async_wrapper(getattr(spam_checker, hook, None))
for hook in spam_checker_methods
}
api.register_spam_checker_callbacks(**hooks) |
Wrapper that loads a third party event rules module configured using the old
configuration, and registers the hooks they implement. | def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
"""Wrapper that loads a third party event rules module configured using the old
configuration, and registers the hooks they implement.
"""
if hs.config.thirdpartyrules.third_party_event_rules is None:
return
module, config = hs.config.thirdpartyrules.third_party_event_rules
api = hs.get_module_api()
third_party_rules = module(config=config, module_api=api)
# The known hooks. If a module implements a method which name appears in this set,
# we'll want to register it.
third_party_event_rules_methods = {
"check_event_allowed",
"on_create_room",
"check_threepid_can_be_invited",
"check_visibility_can_be_modified",
}
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
# We return a separate wrapper for these methods because, in order to wrap them
# correctly, we need to await its result. Therefore it doesn't make a lot of
# sense to make it go through the run() wrapper.
if f.__name__ == "check_event_allowed":
# We need to wrap check_event_allowed because its old form would return either
# a boolean or a dict, but now we want to return the dict separately from the
# boolean.
async def wrap_check_event_allowed(
event: EventBase,
state_events: StateMap[EventBase],
) -> Tuple[bool, Optional[dict]]:
# Assertion required because mypy can't prove we won't change
# `f` back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None
res = await f(event, state_events)
if isinstance(res, dict):
return True, res
else:
return res, None
return wrap_check_event_allowed
if f.__name__ == "on_create_room":
# We need to wrap on_create_room because its old form would return a boolean
# if the room creation is denied, but now we just want it to raise an
# exception.
async def wrap_on_create_room(
requester: Requester, config: dict, is_requester_admin: bool
) -> None:
# Assertion required because mypy can't prove we won't change
# `f` back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None
res = await f(requester, config, is_requester_admin)
if res is False:
raise SynapseError(
403,
"Room creation forbidden with these parameters",
)
return wrap_on_create_room
def run(*args: Any, **kwargs: Any) -> Awaitable:
# Assertion required because mypy can't prove we won't change `f`
# back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None
return maybe_awaitable(f(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks = {
hook: async_wrapper(getattr(third_party_rules, hook, None))
for hook in third_party_event_rules_methods
}
api.register_third_party_rules_callbacks(**hooks) |
Given a JSON dictionary (or event) which might contain sub dictionaries,
flatten it into a single layer dictionary by combining the keys & sub-keys.
String, integer, boolean, null or lists of those values are kept. All others are dropped.
Transforms:
{"foo": {"bar": "test"}}
To:
{"foo.bar": "test"}
Args:
d: The event or content to continue flattening.
prefix: The key prefix (from outer dictionaries).
result: The result to mutate.
Returns:
The resulting dictionary. | def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, JsonValue]] = None,
) -> Dict[str, JsonValue]:
"""
Given a JSON dictionary (or event) which might contain sub dictionaries,
flatten it into a single layer dictionary by combining the keys & sub-keys.
String, integer, boolean, null or lists of those values are kept. All others are dropped.
Transforms:
{"foo": {"bar": "test"}}
To:
{"foo.bar": "test"}
Args:
d: The event or content to continue flattening.
prefix: The key prefix (from outer dictionaries).
result: The result to mutate.
Returns:
The resulting dictionary.
"""
if prefix is None:
prefix = []
if result is None:
result = {}
for key, value in d.items():
# Escape periods in the key with a backslash (and backslashes with an
# extra backslash). This is since a period is used as a separator between
# nested fields.
key = key.replace("\\", "\\\\").replace(".", "\\.")
if _is_simple_value(value):
result[".".join(prefix + [key])] = value
elif isinstance(value, (list, tuple)):
result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)]
elif isinstance(value, Mapping):
# do not set `room_version` due to recursion considerations below
_flatten_dict(value, prefix=(prefix + [key]), result=result)
# `room_version` should only ever be set when looking at the top level of an event
if (
isinstance(d, EventBase)
and PushRuleRoomFlag.EXTENSIBLE_EVENTS in d.room_version.msc3931_push_features
):
# Room supports extensible events: replace `content.body` with the plain text
# representation from `m.markup`, as per MSC1767.
markup = d.get("content").get("m.markup")
if d.room_version.identifier.startswith("org.matrix.msc1767."):
markup = d.get("content").get("org.matrix.msc1767.markup")
if markup is not None and isinstance(markup, list):
text = ""
for rep in markup:
if not isinstance(rep, dict):
# invalid markup - skip all processing
break
if rep.get("mimetype", "text/plain") == "text/plain":
rep_text = rep.get("body")
if rep_text is not None and isinstance(rep_text, str):
text = rep_text.lower()
break
result["content.body"] = text
return result |
Converts a list of rawrules and a enabled map into nested dictionaries
to match the Matrix client-server format for push rules | def format_push_rules_for_user(
user: UserID, ruleslist: FilteredPushRules
) -> Dict[str, Dict[str, List[Dict[str, Any]]]]:
"""Converts a list of rawrules and a enabled map into nested dictionaries
to match the Matrix client-server format for push rules"""
rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {"global": {}}
rules["global"] = _add_empty_priority_class_arrays(rules["global"])
for r, enabled in ruleslist.rules():
template_name = _priority_class_to_template_name(r.priority_class)
rulearray = rules["global"][template_name]
template_rule = _rule_to_template(r)
if not template_rule:
continue
rulearray.append(template_rule)
_convert_type_to_value(template_rule, user)
template_rule["enabled"] = enabled
if "conditions" not in template_rule:
# Not all formatted rules have explicit conditions, e.g. "room"
# rules omit them as they can be derived from the kind and rule ID.
#
# If the formatted rule has no conditions then we can skip the
# formatting of conditions.
continue
# Remove internal stuff.
template_rule["conditions"] = copy.deepcopy(template_rule["conditions"])
for c in template_rule["conditions"]:
c.pop("_cache_key", None)
_convert_type_to_value(c, user)
return rules |
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
This function ignores all actions other than `set_tweak` actions, and treats
absent `value`s as `True`, which agrees with the only spec-defined treatment
of absent `value`s (namely, for `highlight` tweaks).
Args:
actions: list of actions
e.g. [
{"set_tweak": "a", "value": "AAA"},
{"set_tweak": "b", "value": "BBB"},
{"set_tweak": "highlight"},
"notify"
]
Returns:
dictionary of tweaks for those actions
e.g. {"a": "AAA", "b": "BBB", "highlight": True} | def tweaks_for_actions(actions: List[Union[str, Dict]]) -> JsonMapping:
"""
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
This function ignores all actions other than `set_tweak` actions, and treats
absent `value`s as `True`, which agrees with the only spec-defined treatment
of absent `value`s (namely, for `highlight` tweaks).
Args:
actions: list of actions
e.g. [
{"set_tweak": "a", "value": "AAA"},
{"set_tweak": "b", "value": "BBB"},
{"set_tweak": "highlight"},
"notify"
]
Returns:
dictionary of tweaks for those actions
e.g. {"a": "AAA", "b": "BBB", "highlight": True}
"""
tweaks = {}
for a in actions:
if not isinstance(a, dict):
continue
if "set_tweak" in a:
# value is allowed to be absent in which case the value assumed
# should be True.
tweaks[a["set_tweak"]] = a.get("value", True)
return tweaks |
Sanitise a raw HTML string to a set of allowed tags and attributes, and linkify any bare URLs.
Args
raw_html: Unsafe HTML.
Returns:
A Markup object ready to safely use in a Jinja template. | def safe_markup(raw_html: str) -> Markup:
"""
Sanitise a raw HTML string to a set of allowed tags and attributes, and linkify any bare URLs.
Args
raw_html: Unsafe HTML.
Returns:
A Markup object ready to safely use in a Jinja template.
"""
return Markup(
bleach.linkify(
bleach.clean(
raw_html,
tags=ALLOWED_TAGS,
attributes=ALLOWED_ATTRS,
# bleach master has this, but it isn't released yet
# protocols=ALLOWED_SCHEMES,
strip=True,
)
)
) |
Sanitise text (escape any HTML tags), and then linkify any bare URLs.
Args
raw_text: Unsafe text which might include HTML markup.
Returns:
A Markup object ready to safely use in a Jinja template. | def safe_text(raw_text: str) -> Markup:
"""
Sanitise text (escape any HTML tags), and then linkify any bare URLs.
Args
raw_text: Unsafe text which might include HTML markup.
Returns:
A Markup object ready to safely use in a Jinja template.
"""
return Markup(
bleach.linkify(bleach.clean(raw_text, tags=[], attributes=[], strip=False))
) |
Get a description of the room based on the member events.
Args:
member_events: The events of a room.
Returns:
The room description | def descriptor_from_member_events(member_events: Iterable[EventBase]) -> str:
"""Get a description of the room based on the member events.
Args:
member_events: The events of a room.
Returns:
The room description
"""
member_events = list(member_events)
if len(member_events) == 0:
return "nobody"
elif len(member_events) == 1:
return name_from_member_event(member_events[0])
elif len(member_events) == 2:
return "%s and %s" % (
name_from_member_event(member_events[0]),
name_from_member_event(member_events[1]),
)
else:
return "%s and %d others" % (
name_from_member_event(member_events[0]),
len(member_events) - 1,
) |
Parses a command from a received line.
Line should already be stripped of whitespace and be checked if blank. | def parse_command_from_line(line: str) -> Command:
"""Parses a command from a received line.
Line should already be stripped of whitespace and be checked if blank.
"""
idx = line.find(" ")
if idx >= 0:
cmd_name = line[:idx]
rest_of_line = line[idx + 1 :]
else:
cmd_name = line
rest_of_line = ""
cmd_cls = COMMAND_MAP[cmd_name]
return cmd_cls.from_line(rest_of_line) |
Collect stream updates with the same token together
Given a series of updates returned by Stream.get_updates_since(), collects
the updates which share the same stream_id together.
For example:
[(1, a), (1, b), (2, c), (3, d), (3, e)]
becomes:
[
(1, [a, b]),
(2, [c]),
(3, [d, e]),
] | def _batch_updates(
updates: Iterable[Tuple[UpdateToken, UpdateRow]]
) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]:
"""Collect stream updates with the same token together
Given a series of updates returned by Stream.get_updates_since(), collects
the updates which share the same stream_id together.
For example:
[(1, a), (1, b), (2, c), (3, d), (3, e)]
becomes:
[
(1, [a, b]),
(2, [c]),
(3, [d, e]),
]
"""
update_iter = iter(updates)
first_update = next(update_iter, None)
if first_update is None:
# empty input
return
current_batch_token = first_update[0]
current_batch = [first_update[1]]
for token, row in update_iter:
if token != current_batch_token:
# different token to the previous row: flush the previous
# batch and start anew
yield current_batch_token, current_batch
current_batch_token = token
current_batch = []
current_batch.append(row)
# flush the final batch
yield current_batch_token, current_batch |
Creates a connection to Redis that is lazily set up and reconnects if the
connections is lost. | def lazyConnection(
hs: "HomeServer",
host: str = "localhost",
port: int = 6379,
dbid: Optional[int] = None,
reconnect: bool = True,
password: Optional[str] = None,
replyTimeout: int = 30,
) -> ConnectionHandler:
"""Creates a connection to Redis that is lazily set up and reconnects if the
connections is lost.
"""
uuid = "%s:%d" % (host, port)
factory = SynapseRedisFactory(
hs,
uuid=uuid,
dbid=dbid,
poolsize=1,
isLazy=True,
handler=ConnectionHandler,
password=password,
replyTimeout=replyTimeout,
)
factory.continueTrying = reconnect
reactor = hs.get_reactor()
if hs.config.redis.redis_use_tls:
ssl_context_factory = ClientContextFactory(hs.config.redis)
reactor.connectSSL(
host,
port,
factory,
ssl_context_factory,
timeout=30,
bindAddress=None,
)
else:
reactor.connectTCP(
host,
port,
factory,
timeout=30,
bindAddress=None,
)
return factory.handler |
Creates a connection to Redis that is lazily set up and reconnects if the
connection is lost.
Returns:
A subclass of ConnectionHandler, which is a UnixConnectionHandler in this case. | def lazyUnixConnection(
hs: "HomeServer",
path: str = "/tmp/redis.sock",
dbid: Optional[int] = None,
reconnect: bool = True,
password: Optional[str] = None,
replyTimeout: int = 30,
) -> ConnectionHandler:
"""Creates a connection to Redis that is lazily set up and reconnects if the
connection is lost.
Returns:
A subclass of ConnectionHandler, which is a UnixConnectionHandler in this case.
"""
uuid = path
factory = SynapseRedisFactory(
hs,
uuid=uuid,
dbid=dbid,
poolsize=1,
isLazy=True,
handler=UnixConnectionHandler,
password=password,
replyTimeout=replyTimeout,
)
factory.continueTrying = reconnect
reactor = hs.get_reactor()
reactor.connectUNIX(
path,
factory,
timeout=30,
checkPID=False,
)
return factory.handler |
Takes a list of updates of form [(token, row)] and sets the token to
None for all rows where the next row has the same token. This is used to
implement batching.
For example:
[(1, _), (1, _), (2, _), (3, _), (3, _)]
becomes:
[(None, _), (1, _), (2, _), (None, _), (3, _)] | def _batch_updates(
updates: List[Tuple[Token, StreamRow]]
) -> List[Tuple[Optional[Token], StreamRow]]:
"""Takes a list of updates of form [(token, row)] and sets the token to
None for all rows where the next row has the same token. This is used to
implement batching.
For example:
[(1, _), (1, _), (2, _), (3, _), (3, _)]
becomes:
[(None, _), (1, _), (2, _), (None, _), (3, _)]
"""
if not updates:
return []
new_updates: List[Tuple[Optional[Token], StreamRow]] = []
for i, update in enumerate(updates[:-1]):
if update[0] == updates[i + 1][0]:
new_updates.append((None, update[1]))
else:
new_updates.append(update)
new_updates.append(updates[-1])
return new_updates |
Takes a current token callback function for a single writer stream
that doesn't take an instance name parameter and wraps it in a function that
does accept an instance name parameter but ignores it. | def current_token_without_instance(
current_token: Callable[[], int]
) -> Callable[[str], int]:
"""Takes a current token callback function for a single writer stream
that doesn't take an instance name parameter and wraps it in a function that
does accept an instance name parameter but ignores it.
"""
return lambda instance_name: current_token() |
Makes a suitable function for use as an `update_function` that queries
the master process for updates. | def make_http_update_function(hs: "HomeServer", stream_name: str) -> UpdateFunction:
"""Makes a suitable function for use as an `update_function` that queries
the master process for updates.
"""
client = ReplicationGetStreamUpdates.make_client(hs)
async def update_function(
instance_name: str, from_token: int, upto_token: int, limit: int
) -> StreamUpdateResult:
result = await client(
instance_name=instance_name,
stream_name=stream_name,
from_token=from_token,
upto_token=upto_token,
)
return result["updates"], result["upto_token"], result["limited"]
return update_function |
Returns a Twisted web resource which handles '.well-known' requests | def well_known_resource(hs: "HomeServer") -> Resource:
"""Returns a Twisted web resource which handles '.well-known' requests"""
res = Resource()
matrix_resource = Resource()
res.putChild(b"matrix", matrix_resource)
matrix_resource.putChild(b"server", ServerWellKnownResource(hs))
matrix_resource.putChild(b"client", ClientWellKnownResource(hs))
return res |
Media repo specific APIs. | def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Media repo specific APIs.
"""
PurgeMediaCacheRestServlet(hs).register(http_server)
QuarantineMediaInRoom(hs).register(http_server)
QuarantineMediaByID(hs).register(http_server)
UnquarantineMediaByID(hs).register(http_server)
QuarantineMediaByUser(hs).register(http_server)
ProtectMediaByID(hs).register(http_server)
UnprotectMediaByID(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
# XXX DeleteMediaByDateSize must be registered before DeleteMediaByID as
# their URL routes overlap.
DeleteMediaByDateSize(hs).register(http_server)
DeleteMediaByID(hs).register(http_server)
UserMediaRestServlet(hs).register(http_server) |
Returns the list of patterns for an admin endpoint
Args:
path_regex: The regex string to match. This should NOT have a ^
as this will be prefixed.
Returns:
A list of regex patterns. | def admin_patterns(path_regex: str, version: str = "v1") -> Iterable[Pattern]:
"""Returns the list of patterns for an admin endpoint
Args:
path_regex: The regex string to match. This should NOT have a ^
as this will be prefixed.
Returns:
A list of regex patterns.
"""
admin_prefix = "^/_synapse/admin/" + version
patterns = [re.compile(admin_prefix + path_regex)]
return patterns |
Register all the admin servlets. | def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Register all the admin servlets.
"""
# Admin servlets aren't registered on workers.
if hs.config.worker.worker_app is not None:
return
register_servlets_for_client_rest_resource(hs, http_server)
BlockRoomRestServlet(hs).register(http_server)
ListRoomRestServlet(hs).register(http_server)
RoomStateRestServlet(hs).register(http_server)
RoomRestServlet(hs).register(http_server)
RoomRestV2Servlet(hs).register(http_server)
RoomMembersRestServlet(hs).register(http_server)
DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server)
DeleteRoomStatusByRoomIdRestServlet(hs).register(http_server)
JoinRoomAliasServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
if not hs.config.experimental.msc3861.enabled:
UserAdminServlet(hs).register(http_server)
UserMembershipRestServlet(hs).register(http_server)
if not hs.config.experimental.msc3861.enabled:
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
UsersRestServletV2(hs).register(http_server)
UserMediaStatisticsRestServlet(hs).register(http_server)
LargestRoomsStatistics(hs).register(http_server)
EventReportDetailRestServlet(hs).register(http_server)
EventReportsRestServlet(hs).register(http_server)
AccountDataRestServlet(hs).register(http_server)
PushersRestServlet(hs).register(http_server)
MakeRoomAdminRestServlet(hs).register(http_server)
ShadowBanRestServlet(hs).register(http_server)
ForwardExtremitiesRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
RateLimitRestServlet(hs).register(http_server)
UsernameAvailableRestServlet(hs).register(http_server)
if not hs.config.experimental.msc3861.enabled:
ListRegistrationTokensRestServlet(hs).register(http_server)
NewRegistrationTokenRestServlet(hs).register(http_server)
RegistrationTokenRestServlet(hs).register(http_server)
DestinationMembershipRestServlet(hs).register(http_server)
DestinationResetConnectionRestServlet(hs).register(http_server)
DestinationRestServlet(hs).register(http_server)
ListDestinationsRestServlet(hs).register(http_server)
RoomMessagesRestServlet(hs).register(http_server)
RoomTimestampToEventRestServlet(hs).register(http_server)
UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server)
UserByExternalId(hs).register(http_server)
UserByThreePid(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeleteDevicesRestServlet(hs).register(http_server)
SendServerNoticeServlet(hs).register(http_server)
BackgroundUpdateEnabledRestServlet(hs).register(http_server)
BackgroundUpdateRestServlet(hs).register(http_server)
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
ExperimentalFeaturesRestServlet(hs).register(http_server) |
Register only the servlets which need to be exposed on /_matrix/client/xxx | def register_servlets_for_client_rest_resource(
hs: "HomeServer", http_server: HttpServer
) -> None:
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
WhoisRestServlet(hs).register(http_server)
PurgeHistoryStatusRestServlet(hs).register(http_server)
PurgeHistoryRestServlet(hs).register(http_server)
# The following resources can only be run on the main process.
if hs.config.worker.worker_app is None:
DeactivateAccountRestServlet(hs).register(http_server)
if not hs.config.experimental.msc3861.enabled:
ResetPasswordRestServlet(hs).register(http_server)
SearchUsersRestServlet(hs).register(http_server)
if not hs.config.experimental.msc3861.enabled:
UserRegisterServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server)
# Load the media repo ones if we're using them. Otherwise load the servlets which
# don't need a media repo (typically readonly admin APIs).
if hs.config.media.can_load_media_repo:
register_servlets_for_media_repo(hs, http_server)
else:
ListMediaInRoom(hs).register(http_server) |
Raises a SynapseError if a given next_link value is invalid
next_link is valid if the scheme is http(s) and the next_link.domain_whitelist config
option is either empty or contains a domain that matches the one in the given next_link
Args:
hs: The homeserver object
next_link: The next_link value given by the client
Raises:
SynapseError: If the next_link is invalid | def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None:
"""
Raises a SynapseError if a given next_link value is invalid
next_link is valid if the scheme is http(s) and the next_link.domain_whitelist config
option is either empty or contains a domain that matches the one in the given next_link
Args:
hs: The homeserver object
next_link: The next_link value given by the client
Raises:
SynapseError: If the next_link is invalid
"""
valid = True
# Parse the contents of the URL
next_link_parsed = urlparse(next_link)
# Scheme must not point to the local drive
if next_link_parsed.scheme == "file":
valid = False
# If the domain whitelist is set, the domain must be in it
if (
valid
and hs.config.server.next_link_domain_whitelist is not None
and next_link_parsed.hostname not in hs.config.server.next_link_domain_whitelist
):
valid = False
if not valid:
raise SynapseError(
400,
"'next_link' domain not included in whitelist, or not http(s)",
errcode=Codes.INVALID_PARAM,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.