response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
The fully read marker and push rules cannot be directly set via /account_data. | def _check_can_set_account_data_type(account_data_type: str) -> None:
"""The fully read marker and push rules cannot be directly set via /account_data."""
if account_data_type == ReceiptTypes.FULLY_READ:
raise SynapseError(
405,
"Cannot set m.fully_read through this API."
" Use /rooms/!roomId:server.name/read_markers",
Codes.BAD_JSON,
)
elif account_data_type == AccountDataTypes.PUSH_RULES:
raise SynapseError(
405,
"Cannot set m.push_rules through this API. Use /pushrules",
Codes.BAD_JSON,
) |
Return an entry for the login flow dict
Returns an entry suitable for inclusion in "identity_providers" in the
response to GET /_matrix/client/r0/login
Args:
idp: the identity provider to describe | def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict:
"""Return an entry for the login flow dict
Returns an entry suitable for inclusion in "identity_providers" in the
response to GET /_matrix/client/r0/login
Args:
idp: the identity provider to describe
"""
e: JsonDict = {"id": idp.idp_id, "name": idp.idp_name}
if idp.idp_icon:
e["icon"] = idp.idp_icon
if idp.idp_brand:
e["brand"] = idp.idp_brand
return e |
Ensure that the SSO handlers are loaded, if they are enabled by configuration.
This is mostly useful to ensure that the CAS/SAML/OIDC handlers register themselves
with the main SsoHandler.
It's safe to call this multiple times. | def _load_sso_handlers(hs: "HomeServer") -> None:
"""Ensure that the SSO handlers are loaded, if they are enabled by configuration.
This is mostly useful to ensure that the CAS/SAML/OIDC handlers register themselves
with the main SsoHandler.
It's safe to call this multiple times.
"""
if hs.config.cas.cas_enabled:
hs.get_cas_handler()
if hs.config.saml2.saml2_enabled:
hs.get_saml_handler()
if hs.config.oidc.oidc_enabled:
hs.get_oidc_handler() |
Turn a sequence of path components into a rule spec
Args:
path: the URL path components.
Returns:
rule spec, containing scope/template/rule_id entries, and possibly attr.
Raises:
UnrecognizedRequestError if the path components cannot be parsed. | def _rule_spec_from_path(path: List[str]) -> RuleSpec:
"""Turn a sequence of path components into a rule spec
Args:
path: the URL path components.
Returns:
rule spec, containing scope/template/rule_id entries, and possibly attr.
Raises:
UnrecognizedRequestError if the path components cannot be parsed.
"""
if len(path) < 2:
raise UnrecognizedRequestError()
if path[0] != "pushrules":
raise UnrecognizedRequestError()
scope = path[1]
path = path[2:]
if scope != "global":
raise UnrecognizedRequestError()
if len(path) == 0:
raise UnrecognizedRequestError()
template = path[0]
path = path[1:]
if len(path) == 0 or len(path[0]) == 0:
raise UnrecognizedRequestError()
rule_id = path[0]
path = path[1:]
attr = None
if len(path) > 0 and len(path[0]) > 0:
attr = path[0]
return RuleSpec(scope, template, rule_id, attr) |
Get a suitable flows list for registration
Args:
config: server configuration
auth_handler: authorization handler
Returns: a list of supported flows | def _calculate_registration_flows(
config: HomeServerConfig, auth_handler: AuthHandler
) -> List[List[str]]:
"""Get a suitable flows list for registration
Args:
config: server configuration
auth_handler: authorization handler
Returns: a list of supported flows
"""
# FIXME: need a better error than "no auth flow found" for scenarios
# where we required 3PID for registration but the user didn't give one
require_email = "email" in config.registration.registrations_require_3pid
require_msisdn = "msisdn" in config.registration.registrations_require_3pid
show_msisdn = True
show_email = True
if config.registration.disable_msisdn_registration:
show_msisdn = False
require_msisdn = False
enabled_auth_types = auth_handler.get_enabled_auth_types()
if LoginType.EMAIL_IDENTITY not in enabled_auth_types:
show_email = False
if require_email:
raise ConfigError(
"Configuration requires email address at registration, but email "
"validation is not configured"
)
if LoginType.MSISDN not in enabled_auth_types:
show_msisdn = False
if require_msisdn:
raise ConfigError(
"Configuration requires msisdn at registration, but msisdn "
"validation is not configured"
)
flows = []
# only support 3PIDless registration if no 3PIDs are required
if not require_email and not require_msisdn:
# Add a dummy step here, otherwise if a client completes
# recaptcha first we'll assume they were going for this flow
# and complete the request, when they could have been trying to
# complete one of the flows with email/msisdn auth.
flows.append([LoginType.DUMMY])
# only support the email-only flow if we don't require MSISDN 3PIDs
if show_email and not require_msisdn:
flows.append([LoginType.EMAIL_IDENTITY])
# only support the MSISDN-only flow if we don't require email 3PIDs
if show_msisdn and not require_email:
flows.append([LoginType.MSISDN])
if show_email and show_msisdn:
# always let users provide both MSISDN & email
flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY])
# Add a flow that doesn't require any 3pids, if the config requests it.
if config.registration.enable_registration_token_3pid_bypass:
flows.append([LoginType.REGISTRATION_TOKEN])
# Prepend m.login.terms to all flows if we're requiring consent
if config.consent.user_consent_at_registration:
for flow in flows:
flow.insert(0, LoginType.TERMS)
# Prepend recaptcha to all flows if we're requiring captcha
if config.captcha.enable_registration_captcha:
for flow in flows:
flow.insert(0, LoginType.RECAPTCHA)
# Prepend registration token to all flows if we're requiring a token
if config.registration.registration_requires_token:
for flow in flows:
if LoginType.REGISTRATION_TOKEN not in flow:
flow.insert(0, LoginType.REGISTRATION_TOKEN)
return flows |
Registers a transaction-based path.
This registers two paths:
PUT regex_string/$txnid
POST regex_string
Args:
regex_string: The regex string to register. Must NOT have a
trailing $ as this string will be appended to.
http_server: The http_server to register paths with. | def register_txn_path(
servlet: RestServlet,
regex_string: str,
http_server: HttpServer,
) -> None:
"""Registers a transaction-based path.
This registers two paths:
PUT regex_string/$txnid
POST regex_string
Args:
regex_string: The regex string to register. Must NOT have a
trailing $ as this string will be appended to.
http_server: The http_server to register paths with.
"""
on_POST = getattr(servlet, "on_POST", None)
on_PUT = getattr(servlet, "on_PUT", None)
if on_POST is None or on_PUT is None:
raise RuntimeError("on_POST and on_PUT must exist when using register_txn_path")
http_server.register_paths(
"POST",
client_patterns(regex_string + "$", v1=True),
on_POST,
servlet.__class__.__name__,
)
http_server.register_paths(
"PUT",
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
on_PUT,
servlet.__class__.__name__,
) |
Creates a regex compiled client path with the correct client path
prefix.
Args:
path_regex: The regex string to match. This should NOT have a ^
as this will be prefixed.
releases: An iterable of releases to include this endpoint under.
unstable: If true, include this endpoint under the "unstable" prefix.
v1: If true, include this endpoint under the "api/v1" prefix.
Returns:
An iterable of patterns. | def client_patterns(
path_regex: str,
releases: StrCollection = ("r0", "v3"),
unstable: bool = True,
v1: bool = False,
) -> Iterable[Pattern]:
"""Creates a regex compiled client path with the correct client path
prefix.
Args:
path_regex: The regex string to match. This should NOT have a ^
as this will be prefixed.
releases: An iterable of releases to include this endpoint under.
unstable: If true, include this endpoint under the "unstable" prefix.
v1: If true, include this endpoint under the "api/v1" prefix.
Returns:
An iterable of patterns.
"""
versions = []
if v1:
versions.append("api/v1")
versions.extend(releases)
if unstable:
versions.append("unstable")
if len(versions) == 1:
versions_str = versions[0]
elif len(versions) > 1:
versions_str = "(" + "|".join(versions) + ")"
else:
raise RuntimeError("Must have at least one version for a URL")
return [re.compile("^" + CLIENT_API_PREFIX + "/" + versions_str + path_regex)] |
Enforces a maximum limit of a timeline query.
Params:
filter_json: The timeline query to modify.
filter_timeline_limit: The maximum limit to allow, passing -1 will
disable enforcing a maximum limit. | def set_timeline_upper_limit(filter_json: JsonDict, filter_timeline_limit: int) -> None:
"""
Enforces a maximum limit of a timeline query.
Params:
filter_json: The timeline query to modify.
filter_timeline_limit: The maximum limit to allow, passing -1 will
disable enforcing a maximum limit.
"""
if filter_timeline_limit < 0:
return # no upper limits
timeline = filter_json.get("room", {}).get("timeline", {})
if "limit" in timeline:
filter_json["room"]["timeline"]["limit"] = min(
filter_json["room"]["timeline"]["limit"], filter_timeline_limit
) |
Wraps an on_POST method to handle InteractiveAuthIncompleteErrors
Takes a on_POST method which returns an Awaitable (errcode, body) response
and adds exception handling to turn a InteractiveAuthIncompleteError into
a 401 response.
Normal usage is:
@interactive_auth_handler
async def on_POST(self, request):
# ...
await self.auth_handler.check_auth | def interactive_auth_handler(orig: C) -> C:
"""Wraps an on_POST method to handle InteractiveAuthIncompleteErrors
Takes a on_POST method which returns an Awaitable (errcode, body) response
and adds exception handling to turn a InteractiveAuthIncompleteError into
a 401 response.
Normal usage is:
@interactive_auth_handler
async def on_POST(self, request):
# ...
await self.auth_handler.check_auth
"""
async def wrapped(*args: Any, **kwargs: Any) -> Tuple[int, JsonDict]:
try:
return await orig(*args, **kwargs)
except InteractiveAuthIncompleteError as e:
return 401, e.result
return cast(C, wrapped) |
Factory method to generate the username picker resource.
This resource gets mounted under /_synapse/client/pick_username and has two
children:
* "account_details": renders the form and handles the POSTed response
* "check": a JSON endpoint which checks if a userid is free. | def pick_username_resource(hs: "HomeServer") -> Resource:
"""Factory method to generate the username picker resource.
This resource gets mounted under /_synapse/client/pick_username and has two
children:
* "account_details": renders the form and handles the POSTed response
* "check": a JSON endpoint which checks if a userid is free.
"""
res = Resource()
res.putChild(b"account_details", AccountDetailsResource(hs))
res.putChild(b"check", AvailabilityCheckResource(hs))
return res |
Builds a resource tree to include synapse-specific client resources
These are resources which should be loaded on all workers which expose a C-S API:
ie, the main process, and any generic workers so configured.
Returns:
map from path to Resource. | def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resource]:
"""Builds a resource tree to include synapse-specific client resources
These are resources which should be loaded on all workers which expose a C-S API:
ie, the main process, and any generic workers so configured.
Returns:
map from path to Resource.
"""
resources = {
# SSO bits. These are always loaded, whether or not SSO login is actually
# enabled (they just won't work very well if it's not)
"/_synapse/client/pick_idp": PickIdpResource(hs),
"/_synapse/client/pick_username": pick_username_resource(hs),
"/_synapse/client/new_user_consent": NewUserConsentResource(hs),
"/_synapse/client/sso_register": SsoRegisterResource(hs),
# Unsubscribe to notification emails link
"/_synapse/client/unsubscribe": UnsubscribeResource(hs),
}
# Expose the JWKS endpoint if OAuth2 delegation is enabled
if hs.config.experimental.msc3861.enabled:
from synapse.rest.synapse.client.jwks import JwksResource
resources["/_synapse/jwks"] = JwksResource(hs)
# provider-specific SSO bits. Only load these if they are enabled, since they
# rely on optional dependencies.
if hs.config.oidc.oidc_enabled:
from synapse.rest.synapse.client.oidc import OIDCResource
resources["/_synapse/client/oidc"] = OIDCResource(hs)
if hs.config.saml2.saml2_enabled:
from synapse.rest.synapse.client.saml2 import SAML2Resource
res = SAML2Resource(hs)
resources["/_synapse/client/saml2"] = res
# This is also mounted under '/_matrix' for backwards-compatibility.
# To be removed in Synapse v1.32.0.
resources["/_matrix/saml2"] = res
return resources |
Deep-copy a structure, carrying out string substitutions on any strings
Args:
x: structure to be copied
substitutions: substitutions to be made - passed into the string '%' operator
Returns:
copy of x | def copy_with_str_subst(x: Any, substitutions: Any) -> Any:
"""Deep-copy a structure, carrying out string substitutions on any strings
Args:
x: structure to be copied
substitutions: substitutions to be made - passed into the string '%' operator
Returns:
copy of x
"""
if isinstance(x, str):
return x % substitutions
if isinstance(x, dict):
return {k: copy_with_str_subst(v, substitutions) for (k, v) in x.items()}
if isinstance(x, (list, tuple)):
return [copy_with_str_subst(y, substitutions) for y in x]
# assume it's uninterested and can be shallow-copied.
return x |
Takes the state_sets and figures out which keys are conflicted and
which aren't. i.e., which have multiple different event_ids associated
with them in different state sets.
Args:
state_sets:
List of dicts of (type, state_key) -> event_id, which are the
different state groups to resolve.
Returns:
A tuple of (unconflicted_state, conflicted_state), where:
unconflicted_state is a dict mapping (type, state_key)->event_id
for unconflicted state keys.
conflicted_state is a dict mapping (type, state_key) to a set of
event ids for conflicted state keys. | def _seperate(
state_sets: Iterable[StateMap[str]],
) -> Tuple[MutableStateMap[str], MutableStateMap[Set[str]]]:
"""Takes the state_sets and figures out which keys are conflicted and
which aren't. i.e., which have multiple different event_ids associated
with them in different state sets.
Args:
state_sets:
List of dicts of (type, state_key) -> event_id, which are the
different state groups to resolve.
Returns:
A tuple of (unconflicted_state, conflicted_state), where:
unconflicted_state is a dict mapping (type, state_key)->event_id
for unconflicted state keys.
conflicted_state is a dict mapping (type, state_key) to a set of
event ids for conflicted state keys.
"""
state_set_iterator = iter(state_sets)
unconflicted_state = dict(next(state_set_iterator))
conflicted_state: MutableStateMap[Set[str]] = {}
for state_set in state_set_iterator:
for key, value in state_set.items():
# Check if there is an unconflicted entry for the state key.
unconflicted_value = unconflicted_state.get(key)
if unconflicted_value is None:
# There isn't an unconflicted entry so check if there is a
# conflicted entry.
ls = conflicted_state.get(key)
if ls is None:
# There wasn't a conflicted entry so haven't seen this key before.
# Therefore it isn't conflicted yet.
unconflicted_state[key] = value
else:
# This key is already conflicted, add our value to the conflict set.
ls.add(value)
elif unconflicted_value != value:
# If the unconflicted value is not the same as our value then we
# have a new conflict. So move the key from the unconflicted_state
# to the conflicted state.
conflicted_state[key] = {value, unconflicted_value}
unconflicted_state.pop(key, None)
return unconflicted_state, conflicted_state |
Args:
room_version: The room version.
unconflicted_state: The unconflicted state map.
conflicted_state: The conflicted state map.
state_map:
Returns:
A map from state key to event id. | def _create_auth_events_from_maps(
room_version: RoomVersion,
unconflicted_state: StateMap[str],
conflicted_state: StateMap[Set[str]],
state_map: Dict[str, EventBase],
) -> StateMap[str]:
"""
Args:
room_version: The room version.
unconflicted_state: The unconflicted state map.
conflicted_state: The conflicted state map.
state_map:
Returns:
A map from state key to event id.
"""
auth_events = {}
for event_ids in conflicted_state.values():
for event_id in event_ids:
if event_id in state_map:
keys = event_auth.auth_types_for_event(
room_version, state_map[event_id]
)
for key in keys:
if key not in auth_events:
auth_event_id = unconflicted_state.get(key, None)
if auth_event_id:
auth_events[key] = auth_event_id
return auth_events |
This is where we actually decide which of the conflicted state to
use.
We resolve conflicts in the following order:
1. power levels
2. join rules
3. memberships
4. other events. | def _resolve_state_events(
room_version: RoomVersion,
conflicted_state: StateMap[List[EventBase]],
auth_events: MutableStateMap[EventBase],
) -> StateMap[EventBase]:
"""This is where we actually decide which of the conflicted state to
use.
We resolve conflicts in the following order:
1. power levels
2. join rules
3. memberships
4. other events.
"""
resolved_state = {}
if POWER_KEY in conflicted_state:
events = conflicted_state[POWER_KEY]
logger.debug("Resolving conflicted power levels %r", events)
resolved_state[POWER_KEY] = _resolve_auth_events(
room_version, events, auth_events
)
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
if key[0] == EventTypes.JoinRules:
logger.debug("Resolving conflicted join rules %r", events)
resolved_state[key] = _resolve_auth_events(
room_version, events, auth_events
)
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
if key[0] == EventTypes.Member:
logger.debug("Resolving conflicted member lists %r", events)
resolved_state[key] = _resolve_auth_events(
room_version, events, auth_events
)
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
if key not in resolved_state:
logger.debug("Resolving conflicted state %r:%r", key, events)
resolved_state[key] = _resolve_normal_events(events, auth_events)
return resolved_state |
Return the unconflicted and conflicted state. This is different than in
the original algorithm, as this defines a key to be conflicted if one of
the state sets doesn't have that key.
Args:
state_sets
Returns:
A tuple of unconflicted and conflicted state. The conflicted state dict
is a map from type/state_key to set of event IDs | def _seperate(
state_sets: Iterable[StateMap[str]],
) -> Tuple[StateMap[str], StateMap[Set[str]]]:
"""Return the unconflicted and conflicted state. This is different than in
the original algorithm, as this defines a key to be conflicted if one of
the state sets doesn't have that key.
Args:
state_sets
Returns:
A tuple of unconflicted and conflicted state. The conflicted state dict
is a map from type/state_key to set of event IDs
"""
unconflicted_state = {}
conflicted_state = {}
for key in set(itertools.chain.from_iterable(state_sets)):
event_ids = {state_set.get(key) for state_set in state_sets}
if len(event_ids) == 1:
unconflicted_state[key] = event_ids.pop()
else:
event_ids.discard(None)
conflicted_state[key] = event_ids
# mypy doesn't understand that discarding None above means that conflicted
# state is StateMap[Set[str]], not StateMap[Set[Optional[Str]]].
return unconflicted_state, conflicted_state |
Return whether or not the event is a "power event", as defined by the
v2 state resolution algorithm
Args:
event
Returns:
True if the event is a power event. | def _is_power_event(event: EventBase) -> bool:
"""Return whether or not the event is a "power event", as defined by the
v2 state resolution algorithm
Args:
event
Returns:
True if the event is a power event.
"""
if (event.type, event.state_key) in (
(EventTypes.PowerLevels, ""),
(EventTypes.JoinRules, ""),
(EventTypes.Create, ""),
):
return True
if event.type == EventTypes.Member:
if event.membership in ("leave", "ban"):
return event.sender != event.state_key
return False |
Performs a lexicographic reverse topological sort on the graph.
This returns a reverse topological sort (i.e. if node A references B then B
appears before A in the sort), with ties broken lexicographically based on
return value of the `key` function.
NOTE: `graph` is modified during the sort.
Args:
graph: A representation of the graph where each node is a key in the
dict and its value are the nodes edges.
key: A function that takes a node and returns a value that is comparable
and used to order nodes
Yields:
The next node in the topological sort | def lexicographical_topological_sort(
graph: Dict[str, Set[str]], key: Callable[[str], Any]
) -> Generator[str, None, None]:
"""Performs a lexicographic reverse topological sort on the graph.
This returns a reverse topological sort (i.e. if node A references B then B
appears before A in the sort), with ties broken lexicographically based on
return value of the `key` function.
NOTE: `graph` is modified during the sort.
Args:
graph: A representation of the graph where each node is a key in the
dict and its value are the nodes edges.
key: A function that takes a node and returns a value that is comparable
and used to order nodes
Yields:
The next node in the topological sort
"""
# Note, this is basically Kahn's algorithm except we look at nodes with no
# outgoing edges, c.f.
# https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
outdegree_map = graph
reverse_graph: Dict[str, Set[str]] = {}
# Lists of nodes with zero out degree. Is actually a tuple of
# `(key(node), node)` so that sorting does the right thing
zero_outdegree = []
for node, edges in graph.items():
if len(edges) == 0:
zero_outdegree.append((key(node), node))
reverse_graph.setdefault(node, set())
for edge in edges:
reverse_graph.setdefault(edge, set()).add(node)
# heapq is a built in implementation of a sorted queue.
heapq.heapify(zero_outdegree)
while zero_outdegree:
_, node = heapq.heappop(zero_outdegree)
for parent in reverse_graph[node]:
out = outdegree_map[parent]
out.discard(node)
if len(out) == 0:
heapq.heappush(zero_outdegree, (key(parent), parent))
yield node |
Given a resolved state, and a set of input state groups, pick one to base
a new state group on (if any), and return an appropriately-constructed
_StateCacheEntry.
Args:
new_state: resolved state map (mapping from (type, state_key) to event_id)
state_groups_ids:
map from state group id to the state in that state group (where
'state' is a map from state key to event id)
Returns:
The cache entry. | def _make_state_cache_entry(
new_state: StateMap[str], state_groups_ids: Mapping[int, StateMap[str]]
) -> _StateCacheEntry:
"""Given a resolved state, and a set of input state groups, pick one to base
a new state group on (if any), and return an appropriately-constructed
_StateCacheEntry.
Args:
new_state: resolved state map (mapping from (type, state_key) to event_id)
state_groups_ids:
map from state group id to the state in that state group (where
'state' is a map from state key to event id)
Returns:
The cache entry.
"""
# if the new state matches any of the input state groups, we can
# use that state group again. Otherwise we will generate a state_id
# which will be used as a cache key for future resolutions, but
# not get persisted.
# first look for exact matches
new_state_event_ids = set(new_state.values())
for sg, state in state_groups_ids.items():
if len(new_state_event_ids) != len(state):
continue
old_state_event_ids = set(state.values())
if new_state_event_ids == old_state_event_ids:
# got an exact match.
return _StateCacheEntry(state=None, state_group=sg)
# TODO: We want to create a state group for this set of events, to
# increase cache hits, but we need to make sure that it doesn't
# end up as a prev_group without being added to the database
# failing that, look for the closest match.
prev_group = None
delta_ids: Optional[StateMap[str]] = None
for old_group, old_state in state_groups_ids.items():
if old_state.keys() - new_state.keys():
# Currently we don't support deltas that remove keys from the state
# map, so we have to ignore this group as a candidate to base the
# new group on.
continue
n_delta_ids = {k: v for k, v in new_state.items() if old_state.get(k) != v}
if not delta_ids or len(n_delta_ids) < len(delta_ids):
prev_group = old_group
delta_ids = n_delta_ids
if prev_group is not None:
# If we have a prev group and deltas then we can drop the new state from
# the cache (to reduce memory usage).
return _StateCacheEntry(
state=None, state_group=None, prev_group=prev_group, delta_ids=delta_ids
)
else:
return _StateCacheEntry(state=new_state, state_group=None) |
Runs a schema delta to add a constraint to the table. This should be run
in a schema delta file.
For PostgreSQL the constraint is added and validated in the background.
For SQLite the table is recreated and data copied across immediately. This
is done by the caller passing in a script to create the new table. Note that
table indexes and triggers are copied over automatically.
There must be a corresponding call to
`register_background_validate_constraint_and_delete_rows` to register the
background update in one of the data store classes.
Attributes:
txn ordering, update_name: For adding a row to background_updates table.
table: The table to add constraint to. constraint_name: The name of the
new constraint constraint: A `Constraint` object describing the
constraint sqlite_table_name: For SQLite the name of the empty copy of
table sqlite_table_schema: A SQL script for creating the above table. | def run_validate_constraint_and_delete_rows_schema_delta(
txn: "LoggingTransaction",
ordering: int,
update_name: str,
table: str,
constraint_name: str,
constraint: Constraint,
sqlite_table_name: str,
sqlite_table_schema: str,
) -> None:
"""Runs a schema delta to add a constraint to the table. This should be run
in a schema delta file.
For PostgreSQL the constraint is added and validated in the background.
For SQLite the table is recreated and data copied across immediately. This
is done by the caller passing in a script to create the new table. Note that
table indexes and triggers are copied over automatically.
There must be a corresponding call to
`register_background_validate_constraint_and_delete_rows` to register the
background update in one of the data store classes.
Attributes:
txn ordering, update_name: For adding a row to background_updates table.
table: The table to add constraint to. constraint_name: The name of the
new constraint constraint: A `Constraint` object describing the
constraint sqlite_table_name: For SQLite the name of the empty copy of
table sqlite_table_schema: A SQL script for creating the above table.
"""
if isinstance(txn.database_engine, PostgresEngine):
# For postgres we can just add the constraint and mark it as NOT VALID,
# and then insert a background update to go and check the validity in
# the background.
txn.execute(
f"""
ALTER TABLE {table}
ADD CONSTRAINT {constraint_name} {constraint.make_constraint_clause_postgres()}
NOT VALID
"""
)
txn.execute(
"INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (?, ?, '{}')",
(ordering, update_name),
)
else:
# For SQLite, we:
# 1. fetch all indexes/triggers/etc related to the table
# 2. create an empty copy of the table
# 3. copy across the rows (that satisfy the check)
# 4. replace the old table with the new able.
# 5. add back all the indexes/triggers/etc
# Fetch the indexes/triggers/etc. Note that `sql` column being null is
# due to indexes being auto created based on the class definition (e.g.
# PRIMARY KEY), and so don't need to be recreated.
txn.execute(
"""
SELECT sql FROM sqlite_master
WHERE tbl_name = ? AND type != 'table' AND sql IS NOT NULL
""",
(table,),
)
extras = [row[0] for row in txn]
txn.execute(sqlite_table_schema)
sql = f"""
INSERT INTO {sqlite_table_name} SELECT * FROM {table}
WHERE {constraint.make_check_clause(table)}
"""
txn.execute(sql)
txn.execute(f"DROP TABLE {table}")
txn.execute(f"ALTER TABLE {sqlite_table_name} RENAME TO {table}")
for extra in extras:
txn.execute(extra) |
Get the connection pool for the database. | def make_pool(
reactor: IReactorCore,
db_config: DatabaseConnectionConfig,
engine: BaseDatabaseEngine,
) -> adbapi.ConnectionPool:
"""Get the connection pool for the database."""
# By default enable `cp_reconnect`. We need to fiddle with db_args in case
# someone has explicitly set `cp_reconnect`.
db_args = dict(db_config.config.get("args", {}))
db_args.setdefault("cp_reconnect", True)
def _on_new_connection(conn: Connection) -> None:
# Ensure we have a logging context so we can correctly track queries,
# etc.
with LoggingContext("db.on_new_connection"):
engine.on_new_connection(
LoggingDatabaseConnection(conn, engine, "on_new_connection")
)
connection_pool = adbapi.ConnectionPool(
db_config.config["name"],
cp_reactor=reactor,
cp_openfun=_on_new_connection,
**db_args,
)
register_threadpool(f"database-{db_config.name}", connection_pool.threadpool)
return connection_pool |
Make a new connection to the database and return it.
Returns:
Connection | def make_conn(
db_config: DatabaseConnectionConfig,
engine: BaseDatabaseEngine,
default_txn_name: str,
) -> "LoggingDatabaseConnection":
"""Make a new connection to the database and return it.
Returns:
Connection
"""
db_params = {
k: v
for k, v in db_config.config.get("args", {}).items()
if not k.startswith("cp_")
}
native_db_conn = engine.module.connect(**db_params)
db_conn = LoggingDatabaseConnection(native_db_conn, engine, default_txn_name)
engine.on_new_connection(db_conn)
return db_conn |
Returns an SQL clause that checks the given column is in the iterable.
On SQLite this expands to `column IN (?, ?, ...)`, whereas on Postgres
it expands to `column = ANY(?)`. While both DBs support the `IN` form,
using the `ANY` form on postgres means that it views queries with
different length iterables as the same, helping the query stats.
Args:
database_engine
column: Name of the column
iterable: The values to check the column against.
Returns:
A tuple of SQL query and the args | def make_in_list_sql_clause(
database_engine: BaseDatabaseEngine, column: str, iterable: Collection[Any]
) -> Tuple[str, list]:
"""Returns an SQL clause that checks the given column is in the iterable.
On SQLite this expands to `column IN (?, ?, ...)`, whereas on Postgres
it expands to `column = ANY(?)`. While both DBs support the `IN` form,
using the `ANY` form on postgres means that it views queries with
different length iterables as the same, helping the query stats.
Args:
database_engine
column: Name of the column
iterable: The values to check the column against.
Returns:
A tuple of SQL query and the args
"""
if database_engine.supports_using_any_list:
# This should hopefully be faster, but also makes postgres query
# stats easier to understand.
return "%s = ANY(?)" % (column,), [list(iterable)]
else:
return "%s IN (%s)" % (column, ",".join("?" for _ in iterable)), list(iterable) |
Returns an SQL clause that checks the given tuple of columns is in the iterable.
Args:
database_engine
columns: Names of the columns in the tuple.
iterable: The tuples to check the columns against.
Returns:
A tuple of SQL query and the args | def make_tuple_in_list_sql_clause(
database_engine: BaseDatabaseEngine,
columns: Tuple[str, ...],
iterable: Collection[Tuple[Any, ...]],
) -> Tuple[str, list]:
"""Returns an SQL clause that checks the given tuple of columns is in the iterable.
Args:
database_engine
columns: Names of the columns in the tuple.
iterable: The tuples to check the columns against.
Returns:
A tuple of SQL query and the args
"""
if len(columns) == 0:
# Should be unreachable due to mypy, as long as the overloads are set up right.
if () in iterable:
return "TRUE", []
else:
return "FALSE", []
if len(columns) == 1:
# Use `= ANY(?)` on postgres.
return make_in_list_sql_clause(
database_engine, next(iter(columns)), [values[0] for values in iterable]
)
# There are multiple columns. Avoid using an `= ANY(?)` clause on postgres, as
# indices are not used when there are multiple columns. Instead, use an `IN`
# expression.
#
# `IN ((?, ...), ...)` with tuples is supported by postgres only, whereas
# `IN (VALUES (?, ...), ...)` is supported by both sqlite and postgres.
# Thus, the latter is chosen.
if len(iterable) == 0:
# A 0-length `VALUES` list is not allowed in sqlite or postgres.
# Also note that a 0-length `IN (...)` clause (not using `VALUES`) is not
# allowed in postgres.
return "FALSE", []
tuple_sql = "(%s)" % (",".join("?" for _ in columns),)
return "(%s) IN (VALUES %s)" % (
",".join(column for column in columns),
",".join(tuple_sql for _ in iterable),
), [value for values in iterable for value in values] |
Returns a tuple comparison SQL clause
Builds a SQL clause that looks like "(a, b) > (?, ?)"
Args:
keys: A set of (column, value) pairs to be compared.
Returns:
A tuple of SQL query and the args | def make_tuple_comparison_clause(keys: List[Tuple[str, KV]]) -> Tuple[str, List[KV]]:
"""Returns a tuple comparison SQL clause
Builds a SQL clause that looks like "(a, b) > (?, ?)"
Args:
keys: A set of (column, value) pairs to be compared.
Returns:
A tuple of SQL query and the args
"""
return (
"(%s) > (%s)" % (",".join(k[0] for k in keys), ",".join("?" for _ in keys)),
[k[1] for k in keys],
) |
Prepares a physical database for usage. Will either create all necessary tables
or upgrade from an older schema version.
If `config` is None then prepare_database will assert that no upgrade is
necessary, *or* will create a fresh database if the database is empty.
Args:
db_conn:
database_engine:
config :
application config, or None if we are connecting to an existing
database which we expect to be configured already
databases: The name of the databases that will be used
with this physical database. Defaults to all databases. | def prepare_database(
db_conn: LoggingDatabaseConnection,
database_engine: BaseDatabaseEngine,
config: Optional[HomeServerConfig],
databases: Collection[str] = ("main", "state"),
) -> None:
"""Prepares a physical database for usage. Will either create all necessary tables
or upgrade from an older schema version.
If `config` is None then prepare_database will assert that no upgrade is
necessary, *or* will create a fresh database if the database is empty.
Args:
db_conn:
database_engine:
config :
application config, or None if we are connecting to an existing
database which we expect to be configured already
databases: The name of the databases that will be used
with this physical database. Defaults to all databases.
"""
try:
cur = db_conn.cursor(txn_name="prepare_database")
# sqlite does not automatically start transactions for DDL / SELECT statements,
# so we start one before running anything. This ensures that any upgrades
# are either applied completely, or not at all.
#
# psycopg2 does not automatically start transactions when in autocommit mode.
# While it is technically harmless to nest transactions in postgres, doing so
# results in a warning in Postgres' logs per query. And we'd rather like to
# avoid doing that.
if isinstance(database_engine, Sqlite3Engine) or (
isinstance(database_engine, PostgresEngine) and db_conn.autocommit
):
cur.execute("BEGIN TRANSACTION")
logger.info("%r: Checking existing schema version", databases)
version_info = _get_or_create_schema_state(cur, database_engine)
if version_info:
logger.info(
"%r: Existing schema is %i (+%i deltas)",
databases,
version_info.current_version,
len(version_info.applied_deltas),
)
# config should only be None when we are preparing an in-memory SQLite db,
# which should be empty.
if config is None:
raise ValueError(
"config==None in prepare_database, but database is not empty"
)
# This should be run on all processes, master or worker. The master will
# apply the deltas, while workers will check if any outstanding deltas
# exist and raise an PrepareDatabaseException if they do.
_upgrade_existing_database(
cur,
version_info,
database_engine,
config,
databases=databases,
)
else:
logger.info("%r: Initialising new database", databases)
# if it's a worker app, refuse to upgrade the database, to avoid multiple
# workers doing it at once.
if config and config.worker.worker_app is not None:
raise UpgradeDatabaseException(EMPTY_DATABASE_ON_WORKER_ERROR)
_setup_new_database(cur, database_engine, databases=databases)
# check if any of our configured dynamic modules want a database
if config is not None:
_apply_module_schemas(cur, database_engine, config)
cur.close()
db_conn.commit()
except Exception:
db_conn.rollback()
raise |
Sets up the physical database by finding a base set of "full schemas" and
then applying any necessary deltas, including schemas from the given data
stores.
The "full_schemas" directory has subdirectories named after versions. This
function searches for the highest version less than or equal to
`SCHEMA_VERSION` and executes all .sql files in that directory.
The function will then apply all deltas for all versions after the base
version.
Example directory structure:
schema/
common/
delta/
...
full_schemas/
11/
foo.sql
main/
delta/
...
full_schemas/
3/
test.sql
...
11/
bar.sql
...
In the example foo.sql and bar.sql would be run, and then any delta files
for versions strictly greater than 11.
Note: we apply the full schemas and deltas from the `schema/common`
folder as well those in the databases specified.
Args:
cur: a database cursor
database_engine
databases: The names of the databases to instantiate on the given physical database. | def _setup_new_database(
cur: LoggingTransaction,
database_engine: BaseDatabaseEngine,
databases: Collection[str],
) -> None:
"""Sets up the physical database by finding a base set of "full schemas" and
then applying any necessary deltas, including schemas from the given data
stores.
The "full_schemas" directory has subdirectories named after versions. This
function searches for the highest version less than or equal to
`SCHEMA_VERSION` and executes all .sql files in that directory.
The function will then apply all deltas for all versions after the base
version.
Example directory structure:
schema/
common/
delta/
...
full_schemas/
11/
foo.sql
main/
delta/
...
full_schemas/
3/
test.sql
...
11/
bar.sql
...
In the example foo.sql and bar.sql would be run, and then any delta files
for versions strictly greater than 11.
Note: we apply the full schemas and deltas from the `schema/common`
folder as well those in the databases specified.
Args:
cur: a database cursor
database_engine
databases: The names of the databases to instantiate on the given physical database.
"""
# We're about to set up a brand new database so we check that its
# configured to our liking.
database_engine.check_new_database(cur)
full_schemas_dir = os.path.join(schema_path, "common", "full_schemas")
# First we find the highest full schema version we have
valid_versions = []
for filename in os.listdir(full_schemas_dir):
try:
ver = int(filename)
except ValueError:
continue
if ver <= SCHEMA_VERSION:
valid_versions.append(ver)
if not valid_versions:
raise PrepareDatabaseException(
"Could not find a suitable base set of full schemas"
)
max_current_ver = max(valid_versions)
logger.debug("Initialising schema v%d", max_current_ver)
# Now let's find all the full schema files, both in the common schema and
# in database schemas.
directories = [os.path.join(full_schemas_dir, str(max_current_ver))]
directories.extend(
os.path.join(
schema_path,
database,
"full_schemas",
str(max_current_ver),
)
for database in databases
)
directory_entries: List[_DirectoryListing] = []
for directory in directories:
directory_entries.extend(
_DirectoryListing(file_name, os.path.join(directory, file_name))
for file_name in os.listdir(directory)
)
if isinstance(database_engine, PostgresEngine):
specific = "postgres"
else:
specific = "sqlite"
directory_entries.sort()
for entry in directory_entries:
if entry.file_name.endswith(".sql") or entry.file_name.endswith(
".sql." + specific
):
logger.debug("Applying schema %s", entry.absolute_path)
database_engine.execute_script_file(cur, entry.absolute_path)
cur.execute(
"INSERT INTO schema_version (version, upgraded) VALUES (?,?)",
(max_current_ver, False),
)
_upgrade_existing_database(
cur,
_SchemaState(current_version=max_current_ver, compat_version=None),
database_engine=database_engine,
config=None,
databases=databases,
is_empty=True,
) |
Upgrades an existing physical database.
Delta files can either be SQL stored in *.sql files, or python modules
in *.py.
There can be multiple delta files per version. Synapse will keep track of
which delta files have been applied, and will apply any that haven't been
even if there has been no version bump. This is useful for development
where orthogonal schema changes may happen on separate branches.
Different delta files for the same version *must* be orthogonal and give
the same result when applied in any order. No guarantees are made on the
order of execution of these scripts.
This is a no-op of current_version == SCHEMA_VERSION.
Example directory structure:
schema/
delta/
11/
foo.sql
...
12/
foo.sql
bar.py
...
full_schemas/
...
In the example, if current_version is 11, then foo.sql will be run if and
only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
some arbitrary order.
Note: we apply the delta files from the specified data stores as well as
those in the top-level schema. We apply all delta files across data stores
for a version before applying those in the next version.
Args:
cur
current_schema_state: The current version of the schema, as
returned by _get_or_create_schema_state
database_engine
config:
None if we are initialising a blank database, otherwise the application
config
databases: The names of the databases to instantiate
on the given physical database.
is_empty: Is this a blank database? I.e. do we need to run the
upgrade portions of the delta scripts. | def _upgrade_existing_database(
cur: LoggingTransaction,
current_schema_state: _SchemaState,
database_engine: BaseDatabaseEngine,
config: Optional[HomeServerConfig],
databases: Collection[str],
is_empty: bool = False,
) -> None:
"""Upgrades an existing physical database.
Delta files can either be SQL stored in *.sql files, or python modules
in *.py.
There can be multiple delta files per version. Synapse will keep track of
which delta files have been applied, and will apply any that haven't been
even if there has been no version bump. This is useful for development
where orthogonal schema changes may happen on separate branches.
Different delta files for the same version *must* be orthogonal and give
the same result when applied in any order. No guarantees are made on the
order of execution of these scripts.
This is a no-op of current_version == SCHEMA_VERSION.
Example directory structure:
schema/
delta/
11/
foo.sql
...
12/
foo.sql
bar.py
...
full_schemas/
...
In the example, if current_version is 11, then foo.sql will be run if and
only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
some arbitrary order.
Note: we apply the delta files from the specified data stores as well as
those in the top-level schema. We apply all delta files across data stores
for a version before applying those in the next version.
Args:
cur
current_schema_state: The current version of the schema, as
returned by _get_or_create_schema_state
database_engine
config:
None if we are initialising a blank database, otherwise the application
config
databases: The names of the databases to instantiate
on the given physical database.
is_empty: Is this a blank database? I.e. do we need to run the
upgrade portions of the delta scripts.
"""
if is_empty:
assert not current_schema_state.applied_deltas
else:
assert config
is_worker = config and config.worker.worker_app is not None
# If the schema version needs to be updated, and we are on a worker, we immediately
# know to bail out as workers cannot update the database schema. Only one process
# must update the database at the time, therefore we delegate this task to the master.
if is_worker and current_schema_state.current_version < SCHEMA_VERSION:
# If the DB is on an older version than we expect then we refuse
# to start the worker (as the main process needs to run first to
# update the schema).
raise UpgradeDatabaseException(
OUTDATED_SCHEMA_ON_WORKER_ERROR
% (SCHEMA_VERSION, current_schema_state.current_version)
)
if (
current_schema_state.compat_version is not None
and current_schema_state.compat_version > SCHEMA_VERSION
):
raise ValueError(
"Cannot use this database as it is too "
+ "new for the server to understand"
)
# some of the deltas assume that server_name is set correctly, so now
# is a good time to run the sanity check.
if not is_empty and "main" in databases:
from synapse.storage.databases.main import check_database_before_upgrade
assert config is not None
check_database_before_upgrade(cur, database_engine, config)
# update schema_compat_version before we run any upgrades, so that if synapse
# gets downgraded again, it won't try to run against the upgraded database.
if (
current_schema_state.compat_version is None
or current_schema_state.compat_version < SCHEMA_COMPAT_VERSION
):
cur.execute("DELETE FROM schema_compat_version")
cur.execute(
"INSERT INTO schema_compat_version(compat_version) VALUES (?)",
(SCHEMA_COMPAT_VERSION,),
)
start_ver = current_schema_state.current_version
# if we got to this schema version by running a full_schema rather than a series
# of deltas, we should not run the deltas for this version.
if not current_schema_state.upgraded:
start_ver += 1
logger.debug("applied_delta_files: %s", current_schema_state.applied_deltas)
if isinstance(database_engine, PostgresEngine):
specific_engine_extension = ".postgres"
else:
specific_engine_extension = ".sqlite"
specific_engine_extensions = (".sqlite", ".postgres")
for v in range(start_ver, SCHEMA_VERSION + 1):
if not is_worker:
logger.info("Applying schema deltas for v%d", v)
cur.execute("DELETE FROM schema_version")
cur.execute(
"INSERT INTO schema_version (version, upgraded) VALUES (?,?)",
(v, True),
)
else:
logger.info("Checking schema deltas for v%d", v)
# We need to search both the global and per data store schema
# directories for schema updates.
# First we find the directories to search in
delta_dir = os.path.join(schema_path, "common", "delta", str(v))
directories = [delta_dir]
for database in databases:
directories.append(os.path.join(schema_path, database, "delta", str(v)))
# Used to check if we have any duplicate file names
file_name_counter: CounterType[str] = Counter()
# Now find which directories have anything of interest.
directory_entries: List[_DirectoryListing] = []
for directory in directories:
logger.debug("Looking for schema deltas in %s", directory)
try:
file_names = os.listdir(directory)
directory_entries.extend(
_DirectoryListing(file_name, os.path.join(directory, file_name))
for file_name in file_names
)
for file_name in file_names:
file_name_counter[file_name] += 1
except FileNotFoundError:
# Data stores can have empty entries for a given version delta.
pass
except OSError:
raise UpgradeDatabaseException(
"Could not open delta dir for version %d: %s" % (v, directory)
)
duplicates = {
file_name for file_name, count in file_name_counter.items() if count > 1
}
if duplicates:
# We don't support using the same file name in the same delta version.
raise PrepareDatabaseException(
"Found multiple delta files with the same name in v%d: %s"
% (
v,
duplicates,
)
)
# We sort to ensure that we apply the delta files in a consistent
# order (to avoid bugs caused by inconsistent directory listing order)
directory_entries.sort()
for entry in directory_entries:
file_name = entry.file_name
relative_path = os.path.join(str(v), file_name)
absolute_path = entry.absolute_path
logger.debug("Found file: %s (%s)", relative_path, absolute_path)
if relative_path in current_schema_state.applied_deltas:
continue
root_name, ext = os.path.splitext(file_name)
if ext == ".py":
# This is a python upgrade module. We need to import into some
# package and then execute its `run_upgrade` function.
if is_worker:
raise PrepareDatabaseException(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
module_name = "synapse.storage.v%d_%s" % (v, root_name)
spec = importlib.util.spec_from_file_location(
module_name, absolute_path
)
if spec is None:
raise RuntimeError(
f"Could not build a module spec for {module_name} at {absolute_path}"
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
if hasattr(module, "run_create"):
logger.info("Running %s:run_create", relative_path)
module.run_create(cur, database_engine)
if not is_empty and hasattr(module, "run_upgrade"):
logger.info("Running %s:run_upgrade", relative_path)
module.run_upgrade(cur, database_engine, config=config)
elif ext == ".pyc" or file_name == "__pycache__":
# Sometimes .pyc files turn up anyway even though we've
# disabled their generation; e.g. from distribution package
# installers. Silently skip it
continue
elif ext == ".sql":
# A plain old .sql file, just read and execute it
if is_worker:
raise PrepareDatabaseException(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
logger.info("Applying schema %s", relative_path)
database_engine.execute_script_file(cur, absolute_path)
elif ext == specific_engine_extension and root_name.endswith(".sql"):
# A .sql file specific to our engine; just read and execute it
if is_worker:
raise PrepareDatabaseException(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
logger.info("Applying engine-specific schema %s", relative_path)
database_engine.execute_script_file(cur, absolute_path)
elif ext in specific_engine_extensions and root_name.endswith(".sql"):
# A .sql file for a different engine; skip it.
continue
else:
# Not a valid delta file.
logger.warning(
"Found directory entry that did not end in .py or .sql: %s",
relative_path,
)
continue
# Mark as done.
cur.execute(
"INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)",
(v, relative_path),
)
logger.info("Schema now up to date") |
Apply the module schemas for the dynamic modules, if any
Args:
cur: database cursor
database_engine:
config: application config | def _apply_module_schemas(
txn: Cursor, database_engine: BaseDatabaseEngine, config: HomeServerConfig
) -> None:
"""Apply the module schemas for the dynamic modules, if any
Args:
cur: database cursor
database_engine:
config: application config
"""
# This is the old way for password_auth_provider modules to make changes
# to the database. This should instead be done using the module API
for mod, _config in config.authproviders.password_providers:
if not hasattr(mod, "get_db_schema_files"):
continue
modname = ".".join((mod.__module__, mod.__name__))
_apply_module_schema_files(
txn, database_engine, modname, mod.get_db_schema_files()
) |
Apply the module schemas for a single module
Args:
cur: database cursor
database_engine: synapse database engine class
modname: fully qualified name of the module
names_and_streams: the names and streams of schemas to be applied | def _apply_module_schema_files(
cur: Cursor,
database_engine: BaseDatabaseEngine,
modname: str,
names_and_streams: Iterable[Tuple[str, TextIO]],
) -> None:
"""Apply the module schemas for a single module
Args:
cur: database cursor
database_engine: synapse database engine class
modname: fully qualified name of the module
names_and_streams: the names and streams of schemas to be applied
"""
cur.execute(
"SELECT file FROM applied_module_schemas WHERE module_name = ?",
(modname,),
)
applied_deltas = {d for d, in cur}
for name, stream in names_and_streams:
if name in applied_deltas:
continue
root_name, ext = os.path.splitext(name)
if ext != ".sql":
raise PrepareDatabaseException(
"only .sql files are currently supported for module schemas"
)
logger.info("applying schema %s for %s", name, modname)
execute_statements_from_stream(cur, stream)
# Mark as done.
cur.execute(
"INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)",
(modname, name),
) |
Take some data from a database row and return a JSON-decoded object.
Args:
db_content: The JSON-encoded contents from the database.
Returns:
The object decoded from JSON. | def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any:
"""
Take some data from a database row and return a JSON-decoded object.
Args:
db_content: The JSON-encoded contents from the database.
Returns:
The object decoded from JSON.
"""
# psycopg2 on Python 3 returns memoryview objects, which we need to
# cast to bytes to decode
if isinstance(db_content, memoryview):
db_content = db_content.tobytes()
# Decode it to a Unicode string before feeding it to the JSON decoder, since
# it only supports handling strings
if isinstance(db_content, (bytes, bytearray)):
db_content = db_content.decode("utf8")
try:
return json_decoder.decode(db_content)
except Exception:
logging.warning("Tried to decode '%r' as JSON and failed", db_content)
raise |
Create a ServerAclEvaluator from a m.room.server_acl event's content.
This does up-front parsing of the content to ignore bad data. It then creates
the ServerAclEvaluator which will pre-compile regular expressions from the globs. | def server_acl_evaluator_from_event(acl_event: EventBase) -> "ServerAclEvaluator":
"""
Create a ServerAclEvaluator from a m.room.server_acl event's content.
This does up-front parsing of the content to ignore bad data. It then creates
the ServerAclEvaluator which will pre-compile regular expressions from the globs.
"""
# first of all, parse if literal IPs are blocked.
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warning("Ignoring non-bool allow_ip_literals flag")
allow_ip_literals = True
# next, parse the deny list by ignoring any non-strings.
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warning("Ignoring non-list deny ACL %s", deny)
deny = []
else:
deny = [s for s in deny if isinstance(s, str)]
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warning("Ignoring non-list allow ACL %s", allow)
allow = []
else:
allow = [s for s in allow if isinstance(s, str)]
return ServerAclEvaluator(allow_ip_literals, allow, deny) |
Custom serializer for actions. This allows us to "compress" common actions.
We use the fact that most users have the same actions for notifs (and for
highlights).
We store these default actions as the empty string rather than the full JSON.
Since the empty string isn't valid JSON there is no risk of this clashing with
any real JSON actions | def _serialize_action(
actions: Collection[Union[Mapping, str]], is_highlight: bool
) -> str:
"""Custom serializer for actions. This allows us to "compress" common actions.
We use the fact that most users have the same actions for notifs (and for
highlights).
We store these default actions as the empty string rather than the full JSON.
Since the empty string isn't valid JSON there is no risk of this clashing with
any real JSON actions
"""
if is_highlight:
if actions == DEFAULT_HIGHLIGHT_ACTION:
return "" # We use empty string as the column is non-NULL
else:
if actions == DEFAULT_NOTIF_ACTION:
return ""
return json_encoder.encode(actions) |
Custom deserializer for actions. This allows us to "compress" common actions | def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, str]]:
"""Custom deserializer for actions. This allows us to "compress" common actions"""
if actions:
return db_to_json(actions)
if is_highlight:
return DEFAULT_HIGHLIGHT_ACTION
else:
return DEFAULT_NOTIF_ACTION |
Take the DB rows returned from the DB and convert them into a full
`FilteredPushRules` object.
Args:
rawrules: List of tuples of:
* rule ID
* Priority lass
* Conditions (as serialized JSON)
* Actions (as serialized JSON)
enabled_map: A dictionary of rule ID to a boolean of whether the rule is
enabled. This might not include all rule IDs from rawrules.
experimental_config: The `experimental_features` section of the Synapse
config. (Used to check if various features are enabled.)
Returns:
A new FilteredPushRules object. | def _load_rules(
rawrules: List[Tuple[str, int, str, str]],
enabled_map: Dict[str, bool],
experimental_config: ExperimentalConfig,
) -> FilteredPushRules:
"""Take the DB rows returned from the DB and convert them into a full
`FilteredPushRules` object.
Args:
rawrules: List of tuples of:
* rule ID
* Priority lass
* Conditions (as serialized JSON)
* Actions (as serialized JSON)
enabled_map: A dictionary of rule ID to a boolean of whether the rule is
enabled. This might not include all rule IDs from rawrules.
experimental_config: The `experimental_features` section of the Synapse
config. (Used to check if various features are enabled.)
Returns:
A new FilteredPushRules object.
"""
ruleslist = [
PushRule.from_db(
rule_id=rawrule[0],
priority_class=rawrule[1],
conditions=rawrule[2],
actions=rawrule[3],
)
for rawrule in rawrules
]
push_rules = PushRules(ruleslist)
filtered_rules = FilteredPushRules(
push_rules,
enabled_map,
msc1767_enabled=experimental_config.msc1767_enabled,
msc3664_enabled=experimental_config.msc3664_enabled,
msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,
msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events,
)
return filtered_rules |
Gets the localpart of the max current generated user ID.
Generated user IDs are integers, so we find the largest integer user ID
already taken and return that. | def find_max_generated_user_id_localpart(cur: Cursor) -> int:
"""
Gets the localpart of the max current generated user ID.
Generated user IDs are integers, so we find the largest integer user ID
already taken and return that.
"""
# We bound between '@0' and '@a' to avoid pulling the entire table
# out.
cur.execute("SELECT name FROM users WHERE '@0' <= name AND name < '@a'")
regex = re.compile(r"^@(\d+):")
max_found = 0
for (user_id,) in cur:
match = regex.search(user_id)
if match:
max_found = max(int(match.group(1)), max_found)
return max_found |
Determine the users that represent a room, from the perspective of the `me` user.
The rules which say which users we select are specified in the "Room Summary"
section of
https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync
Returns a list (possibly empty) of heroes' mxids. | def extract_heroes_from_room_summary(
details: Mapping[str, MemberSummary], me: str
) -> List[str]:
"""Determine the users that represent a room, from the perspective of the `me` user.
The rules which say which users we select are specified in the "Room Summary"
section of
https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync
Returns a list (possibly empty) of heroes' mxids.
"""
empty_ms = MemberSummary([], 0)
joined_user_ids = [
r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
]
invited_user_ids = [
r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
]
gone_user_ids = [
r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
# FIXME: order by stream ordering rather than as returned by SQL
if joined_user_ids or invited_user_ids:
return sorted(joined_user_ids + invited_user_ids)[0:5]
else:
return sorted(gone_user_ids)[0:5] |
Replaces any null code points in the string with spaces as
Postgres and SQLite do not like the insertion of strings with
null code points into the full-text search tables. | def _clean_value_for_search(value: str) -> str:
"""
Replaces any null code points in the string with spaces as
Postgres and SQLite do not like the insertion of strings with
null code points into the full-text search tables.
"""
return value.replace("\u0000", " ") |
Convert the user-supplied `query` into a TokenList, which can be translated into
some DB-specific syntax.
The following constructs are supported:
- phrase queries using "double quotes"
- case-insensitive `or` and `and` operators
- negation of a keyword via unary `-`
- unary hyphen to denote NOT e.g. 'include -exclude'
The following differs from websearch_to_tsquery:
- Stop words are not removed.
- Unclosed phrases are treated differently. | def _tokenize_query(query: str) -> TokenList:
"""
Convert the user-supplied `query` into a TokenList, which can be translated into
some DB-specific syntax.
The following constructs are supported:
- phrase queries using "double quotes"
- case-insensitive `or` and `and` operators
- negation of a keyword via unary `-`
- unary hyphen to denote NOT e.g. 'include -exclude'
The following differs from websearch_to_tsquery:
- Stop words are not removed.
- Unclosed phrases are treated differently.
"""
tokens: TokenList = []
# Find phrases.
in_phrase = False
parts = deque(query.split('"'))
for i, part in enumerate(parts):
# The contents inside double quotes is treated as a phrase.
in_phrase = bool(i % 2)
# Pull out the individual words, discarding any non-word characters.
words = deque(re.findall(r"([\w\-]+)", part, re.UNICODE))
# Phrases have simplified handling of words.
if in_phrase:
# Skip stop words.
phrase = [word for word in words if not _is_stop_word(word)]
# Consecutive words are implicitly ANDed together.
if tokens and tokens[-1] not in (SearchToken.Not, SearchToken.Or):
tokens.append(SearchToken.And)
# Add the phrase.
tokens.append(Phrase(phrase))
continue
# Otherwise, not in a phrase.
while words:
word = words.popleft()
if word.startswith("-"):
tokens.append(SearchToken.Not)
# If there's more word, put it back to be processed again.
word = word[1:]
if word:
words.appendleft(word)
elif word.lower() == "or":
tokens.append(SearchToken.Or)
else:
# Skip stop words.
if _is_stop_word(word):
continue
# Consecutive words are implicitly ANDed together.
if tokens and tokens[-1] not in (SearchToken.Not, SearchToken.Or):
tokens.append(SearchToken.And)
# Add the search term.
tokens.append(word)
return tokens |
Convert the list of tokens to a string suitable for passing to sqlite's MATCH.
Assume sqlite was compiled with enhanced query syntax.
Ref: https://www.sqlite.org/fts3.html#full_text_index_queries | def _tokens_to_sqlite_match_query(tokens: TokenList) -> str:
"""
Convert the list of tokens to a string suitable for passing to sqlite's MATCH.
Assume sqlite was compiled with enhanced query syntax.
Ref: https://www.sqlite.org/fts3.html#full_text_index_queries
"""
match_query = []
for token in tokens:
if isinstance(token, str):
match_query.append(token)
elif isinstance(token, Phrase):
match_query.append('"' + " ".join(token.phrase) + '"')
elif token == SearchToken.Not:
# TODO: SQLite treats NOT as a *binary* operator. Hopefully a search
# term has already been added before this.
match_query.append(" NOT ")
elif token == SearchToken.Or:
match_query.append(" OR ")
elif token == SearchToken.And:
match_query.append(" AND ")
else:
raise ValueError(f"unknown token {token}")
return "".join(match_query) |
Takes a plain unicode string from the user and converts it into a form
that can be passed to sqllite's matchinfo(). | def _parse_query_for_sqlite(search_term: str) -> str:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to sqllite's matchinfo().
"""
return _tokens_to_sqlite_match_query(_tokenize_query(search_term)) |
Creates an SQL expression to bound the columns by the pagination
tokens.
For example creates an SQL expression like:
(6, 7) >= (topological_ordering, stream_ordering)
AND (5, 3) < (topological_ordering, stream_ordering)
would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3).
Note that tokens are considered to be after the row they are in, e.g. if
a row A has a token T, then we consider A to be before T. This convention
is important when figuring out inequalities for the generated SQL, and
produces the following result:
- If paginating forwards then we exclude any rows matching the from
token, but include those that match the to token.
- If paginating backwards then we include any rows matching the from
token, but include those that match the to token.
Args:
direction: Whether we're paginating backwards or forwards.
column_names: The column names to bound. Must *not* be user defined as
these get inserted directly into the SQL statement without escapes.
from_token: The start point for the pagination. This is an exclusive
minimum bound if direction is forwards, and an inclusive maximum bound if
direction is backwards.
to_token: The endpoint point for the pagination. This is an inclusive
maximum bound if direction is forwards, and an exclusive minimum bound if
direction is backwards.
engine: The database engine to generate the clauses for
Returns:
The sql expression | def generate_pagination_where_clause(
direction: Direction,
column_names: Tuple[str, str],
from_token: Optional[Tuple[Optional[int], int]],
to_token: Optional[Tuple[Optional[int], int]],
engine: BaseDatabaseEngine,
) -> str:
"""Creates an SQL expression to bound the columns by the pagination
tokens.
For example creates an SQL expression like:
(6, 7) >= (topological_ordering, stream_ordering)
AND (5, 3) < (topological_ordering, stream_ordering)
would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3).
Note that tokens are considered to be after the row they are in, e.g. if
a row A has a token T, then we consider A to be before T. This convention
is important when figuring out inequalities for the generated SQL, and
produces the following result:
- If paginating forwards then we exclude any rows matching the from
token, but include those that match the to token.
- If paginating backwards then we include any rows matching the from
token, but include those that match the to token.
Args:
direction: Whether we're paginating backwards or forwards.
column_names: The column names to bound. Must *not* be user defined as
these get inserted directly into the SQL statement without escapes.
from_token: The start point for the pagination. This is an exclusive
minimum bound if direction is forwards, and an inclusive maximum bound if
direction is backwards.
to_token: The endpoint point for the pagination. This is an inclusive
maximum bound if direction is forwards, and an exclusive minimum bound if
direction is backwards.
engine: The database engine to generate the clauses for
Returns:
The sql expression
"""
where_clause = []
if from_token:
where_clause.append(
_make_generic_sql_bound(
bound=">=" if direction == Direction.BACKWARDS else "<",
column_names=column_names,
values=from_token,
engine=engine,
)
)
if to_token:
where_clause.append(
_make_generic_sql_bound(
bound="<" if direction == Direction.BACKWARDS else ">=",
column_names=column_names,
values=to_token,
engine=engine,
)
)
return " AND ".join(where_clause) |
Generate a start and end point for this page of events.
Args:
direction: Whether pagination is going forwards or backwards.
from_token: The token to start pagination at, or None to start at the first value.
to_token: The token to end pagination at, or None to not limit the end point.
Returns:
A three tuple of:
ASC or DESC for sorting of the query.
The starting position as a tuple of ints representing
(topological position, stream position) or None if no from_token was
provided. The topological position may be None for live tokens.
The end position in the same format as the starting position, or None
if no to_token was provided. | def generate_pagination_bounds(
direction: Direction,
from_token: Optional[RoomStreamToken],
to_token: Optional[RoomStreamToken],
) -> Tuple[
str, Optional[Tuple[Optional[int], int]], Optional[Tuple[Optional[int], int]]
]:
"""
Generate a start and end point for this page of events.
Args:
direction: Whether pagination is going forwards or backwards.
from_token: The token to start pagination at, or None to start at the first value.
to_token: The token to end pagination at, or None to not limit the end point.
Returns:
A three tuple of:
ASC or DESC for sorting of the query.
The starting position as a tuple of ints representing
(topological position, stream position) or None if no from_token was
provided. The topological position may be None for live tokens.
The end position in the same format as the starting position, or None
if no to_token was provided.
"""
# Tokens really represent positions between elements, but we use
# the convention of pointing to the event before the gap. Hence
# we have a bit of asymmetry when it comes to equalities.
if direction == Direction.BACKWARDS:
order = "DESC"
else:
order = "ASC"
# The bounds for the stream tokens are complicated by the fact
# that we need to handle the instance_map part of the tokens. We do this
# by fetching all events between the min stream token and the maximum
# stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and
# then filtering the results.
from_bound: Optional[Tuple[Optional[int], int]] = None
if from_token:
if from_token.topological is not None:
from_bound = from_token.as_historical_tuple()
elif direction == Direction.BACKWARDS:
from_bound = (
None,
from_token.get_max_stream_pos(),
)
else:
from_bound = (
None,
from_token.stream,
)
to_bound: Optional[Tuple[Optional[int], int]] = None
if to_token:
if to_token.topological is not None:
to_bound = to_token.as_historical_tuple()
elif direction == Direction.BACKWARDS:
to_bound = (
None,
to_token.stream,
)
else:
to_bound = (
None,
to_token.get_max_stream_pos(),
)
return order, from_bound, to_bound |
Generate the next room stream token based on the currently returned data.
Args:
direction: Whether pagination is going forwards or backwards.
last_topo_ordering: The last topological ordering being returned.
last_stream_ordering: The last stream ordering being returned.
Returns:
A new RoomStreamToken to return to the client. | def generate_next_token(
direction: Direction, last_topo_ordering: int, last_stream_ordering: int
) -> RoomStreamToken:
"""
Generate the next room stream token based on the currently returned data.
Args:
direction: Whether pagination is going forwards or backwards.
last_topo_ordering: The last topological ordering being returned.
last_stream_ordering: The last stream ordering being returned.
Returns:
A new RoomStreamToken to return to the client.
"""
if direction == Direction.BACKWARDS:
# Tokens are positions between events.
# This token points *after* the last event in the chunk.
# We need it to point to the event before it in the chunk
# when we are going backwards so we subtract one from the
# stream part.
last_stream_ordering -= 1
return RoomStreamToken(topological=last_topo_ordering, stream=last_stream_ordering) |
Create an SQL expression that bounds the given column names by the
values, e.g. create the equivalent of `(1, 2) < (col1, col2)`.
Only works with two columns.
Older versions of SQLite don't support that syntax so we have to expand it
out manually.
Args:
bound: The comparison operator to use. One of ">", "<", ">=",
"<=", where the values are on the left and columns on the right.
names: The column names. Must *not* be user defined
as these get inserted directly into the SQL statement without
escapes.
values: The values to bound the columns by. If
the first value is None then only creates a bound on the second
column.
engine: The database engine to generate the SQL for
Returns:
The SQL statement | def _make_generic_sql_bound(
bound: str,
column_names: Tuple[str, str],
values: Tuple[Optional[int], int],
engine: BaseDatabaseEngine,
) -> str:
"""Create an SQL expression that bounds the given column names by the
values, e.g. create the equivalent of `(1, 2) < (col1, col2)`.
Only works with two columns.
Older versions of SQLite don't support that syntax so we have to expand it
out manually.
Args:
bound: The comparison operator to use. One of ">", "<", ">=",
"<=", where the values are on the left and columns on the right.
names: The column names. Must *not* be user defined
as these get inserted directly into the SQL statement without
escapes.
values: The values to bound the columns by. If
the first value is None then only creates a bound on the second
column.
engine: The database engine to generate the SQL for
Returns:
The SQL statement
"""
assert bound in (">", "<", ">=", "<=")
name1, name2 = column_names
val1, val2 = values
if val1 is None:
val2 = int(val2)
return "(%d %s %s)" % (val2, bound, name2)
val1 = int(val1)
val2 = int(val2)
if isinstance(engine, PostgresEngine):
# Postgres doesn't optimise ``(x < a) OR (x=a AND y<b)`` as well
# as it optimises ``(x,y) < (a,b)`` on multicolumn indexes. So we
# use the later form when running against postgres.
return "((%d,%d) %s (%s,%s))" % (val1, val2, bound, name1, name2)
# We want to generate queries of e.g. the form:
#
# (val1 < name1 OR (val1 = name1 AND val2 <= name2))
#
# which is equivalent to (val1, val2) < (name1, name2)
return """(
{val1:d} {strict_bound} {name1}
OR ({val1:d} = {name1} AND {val2:d} {bound} {name2})
)""".format(
name1=name1,
val1=val1,
name2=name2,
val2=val2,
strict_bound=bound[0], # The first bound must always be strict equality here
bound=bound,
) |
Returns True if the event persisted by the given instance at the given
topological/stream_ordering falls between the two tokens (taking a None
token to mean unbounded).
Used to filter results from fetching events in the DB against the given
tokens. This is necessary to handle the case where the tokens include
position maps, which we handle by fetching more than necessary from the DB
and then filtering (rather than attempting to construct a complicated SQL
query). | def _filter_results(
lower_token: Optional[RoomStreamToken],
upper_token: Optional[RoomStreamToken],
instance_name: str,
topological_ordering: int,
stream_ordering: int,
) -> bool:
"""Returns True if the event persisted by the given instance at the given
topological/stream_ordering falls between the two tokens (taking a None
token to mean unbounded).
Used to filter results from fetching events in the DB against the given
tokens. This is necessary to handle the case where the tokens include
position maps, which we handle by fetching more than necessary from the DB
and then filtering (rather than attempting to construct a complicated SQL
query).
"""
event_historical_tuple = (
topological_ordering,
stream_ordering,
)
if lower_token:
if lower_token.topological is not None:
# If these are historical tokens we compare the `(topological, stream)`
# tuples.
if event_historical_tuple <= lower_token.as_historical_tuple():
return False
else:
# If these are live tokens we compare the stream ordering against the
# writers stream position.
if stream_ordering <= lower_token.get_stream_pos_for_instance(
instance_name
):
return False
if upper_token:
if upper_token.topological is not None:
if upper_token.as_historical_tuple() < event_historical_tuple:
return False
else:
if upper_token.get_stream_pos_for_instance(instance_name) < stream_ordering:
return False
return True |
Transforms text before it is inserted into the user directory index, or searched
for in the user directory index.
Note that the user directory search table needs to be rebuilt whenever this function
changes. | def _filter_text_for_index(text: str) -> str:
"""Transforms text before it is inserted into the user directory index, or searched
for in the user directory index.
Note that the user directory search table needs to be rebuilt whenever this function
changes.
"""
# Lowercase the text, to make searches case-insensitive.
# This is necessary for both PostgreSQL and SQLite. PostgreSQL's
# `to_tsquery/to_tsvector` functions don't lowercase non-ASCII characters when using
# the "C" collation, while SQLite just doesn't lowercase non-ASCII characters at
# all.
text = text.lower()
# Normalize the text. NFKC normalization has two effects:
# 1. It canonicalizes the text, ie. maps all visually identical strings to the same
# string. For example, ["e", "◌́"] is mapped to ["é"].
# 2. It maps strings that are roughly equivalent to the same string.
# For example, ["dž"] is mapped to ["d", "ž"], ["①"] to ["1"] and ["i⁹"] to
# ["i", "9"].
text = unicodedata.normalize("NFKC", text)
# Note that nothing is done to make searches accent-insensitive.
# That could be achieved by converting to NFKD form instead (with combining accents
# split out) and filtering out combining accents using `unicodedata.combining(c)`.
# The downside of this may be noisier search results, since search terms with
# explicit accents will match characters with no accents, or completely different
# accents.
#
# text = unicodedata.normalize("NFKD", text)
# text = "".join([c for c in text if not unicodedata.combining(c)])
return text |
Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
that is supported by default.
We specifically add both a prefix and non prefix matching term so that
exact matches get ranked higher. | def _parse_query_sqlite(search_term: str) -> str:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
that is supported by default.
We specifically add both a prefix and non prefix matching term so that
exact matches get ranked higher.
"""
search_term = _filter_text_for_index(search_term)
# Pull out the individual words, discarding any non-word characters.
results = _parse_words(search_term)
return " & ".join("(%s* OR %s)" % (result, result) for result in results) |
Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
that is supported by default. | def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
that is supported by default.
"""
search_term = _filter_text_for_index(search_term)
escaped_words = []
for word in _parse_words(search_term):
# Postgres tsvector and tsquery quoting rules:
# words potentially containing punctuation should be quoted
# and then existing quotes and backslashes should be doubled
# See: https://www.postgresql.org/docs/current/datatype-textsearch.html#DATATYPE-TSQUERY
quoted_word = word.replace("'", "''").replace("\\", "\\\\")
escaped_words.append(f"'{quoted_word}'")
both = " & ".join("(%s:* | %s)" % (word, word) for word in escaped_words)
exact = " & ".join("%s" % (word,) for word in escaped_words)
prefix = " & ".join("%s:*" % (word,) for word in escaped_words)
return both, exact, prefix |
Split the provided search string into a list of its words.
If support for ICU (International Components for Unicode) is available, use it.
Otherwise, fall back to using a regex to detect word boundaries. This latter
solution works well enough for most latin-based languages, but doesn't work as well
with other languages.
Args:
search_term: The search string.
Returns:
A list of the words in the search string. | def _parse_words(search_term: str) -> List[str]:
"""Split the provided search string into a list of its words.
If support for ICU (International Components for Unicode) is available, use it.
Otherwise, fall back to using a regex to detect word boundaries. This latter
solution works well enough for most latin-based languages, but doesn't work as well
with other languages.
Args:
search_term: The search string.
Returns:
A list of the words in the search string.
"""
if USE_ICU:
return _parse_words_with_icu(search_term)
return _parse_words_with_regex(search_term) |
Break down search term into words, when we don't have ICU available.
See: `_parse_words` | def _parse_words_with_regex(search_term: str) -> List[str]:
"""
Break down search term into words, when we don't have ICU available.
See: `_parse_words`
"""
return re.findall(r"([\w\-]+)", search_term, re.UNICODE) |
Break down the provided search string into its individual words using ICU
(International Components for Unicode).
Args:
search_term: The search string.
Returns:
A list of the words in the search string. | def _parse_words_with_icu(search_term: str) -> List[str]:
"""Break down the provided search string into its individual words using ICU
(International Components for Unicode).
Args:
search_term: The search string.
Returns:
A list of the words in the search string.
"""
results = []
breaker = icu.BreakIterator.createWordInstance(icu.Locale.getDefault())
breaker.setText(search_term)
i = 0
while True:
j = breaker.nextBoundary()
if j < 0:
break
result = search_term[i:j]
# libicu considers spaces and punctuation between words as words, but we don't
# want to include those in results as they would result in syntax errors in SQL
# queries (e.g. "foo bar" would result in the search query including "foo & &
# bar").
if len(re.findall(r"([\w\-]+)", result, re.UNICODE)):
results.append(result)
i = j
return results |
Called before upgrading an existing database to check that it is broadly sane
compared with the configuration. | def check_database_before_upgrade(
cur: Cursor, database_engine: BaseDatabaseEngine, config: HomeServerConfig
) -> None:
"""Called before upgrading an existing database to check that it is broadly sane
compared with the configuration.
"""
logger.info("Checking database for consistency with configuration...")
# if there are any users in the database, check that the username matches our
# configured server name.
cur.execute("SELECT name FROM users LIMIT 1")
rows = cur.fetchall()
if not rows:
return
user_domain = get_domain_from_id(rows[0][0])
if user_domain == config.server.server_name:
return
raise Exception(
"Found users in database not native to %s!\n"
"You cannot change a synapse server_name after it's been configured"
% (config.server.server_name,)
) |
Handle match_info called w/default args 'pcx' - based on the example rank
function http://sqlite.org/fts3.html#appendix_a | def _rank(raw_match_info: bytes) -> float:
"""Handle match_info called w/default args 'pcx' - based on the example rank
function http://sqlite.org/fts3.html#appendix_a
"""
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
col_idx = phrase_info_idx + (col_num * 3)
x1, x2 = match_info[col_idx : col_idx + 2]
if x1 > 0:
score += float(x1) / x2
return score |
Add a bg update to populate the `state_key` and `rejection_reason` columns of `events` | def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
"""Add a bg update to populate the `state_key` and `rejection_reason` columns of `events`"""
# we know that any new events will have the columns populated (and that has been
# the case since schema_version 68, so there is no chance of rolling back now).
#
# So, we only need to make sure that existing rows are updated. We read the
# current min and max stream orderings, since that is guaranteed to include all
# the events that were stored before the new columns were added.
cur.execute("SELECT MIN(stream_ordering), MAX(stream_ordering) FROM events")
row = cur.fetchone()
assert row is not None
(min_stream_ordering, max_stream_ordering) = row
if min_stream_ordering is None:
# no rows, nothing to do.
return
cur.execute(
"INSERT into background_updates (ordering, update_name, progress_json)"
" VALUES (7203, 'events_populate_state_key_rejections', ?)",
(
json.dumps(
{
"min_stream_ordering_exclusive": min_stream_ordering - 1,
"max_stream_ordering_inclusive": max_stream_ordering,
}
),
),
) |
Upgrade the event_search table to use the porter tokenizer if it isn't already
Applies only for sqlite. | def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
"""
Upgrade the event_search table to use the porter tokenizer if it isn't already
Applies only for sqlite.
"""
if not isinstance(database_engine, Sqlite3Engine):
return
# Rebuild the table event_search table with tokenize=porter configured.
cur.execute("DROP TABLE event_search")
cur.execute(
"""
CREATE VIRTUAL TABLE event_search
USING fts4 (tokenize=porter, event_id, room_id, sender, key, value )
"""
)
# Re-run the background job to re-populate the event_search table.
cur.execute("SELECT MIN(stream_ordering) FROM events")
row = cur.fetchone()
assert row is not None
min_stream_id = row[0]
# If there are not any events, nothing to do.
if min_stream_id is None:
return
cur.execute("SELECT MAX(stream_ordering) FROM events")
row = cur.fetchone()
assert row is not None
max_stream_id = row[0]
progress = {
"target_min_stream_id_inclusive": min_stream_id,
"max_stream_id_exclusive": max_stream_id + 1,
}
progress_json = json.dumps(progress)
sql = """
INSERT into background_updates (ordering, update_name, progress_json)
VALUES (?, ?, ?)
"""
cur.execute(sql, (7310, "event_search", progress_json)) |
Part 3 of a multi-step migration to drop the column `user_id` and replace it with
`full_user_id`. See the database schema docs for more information on the full
migration steps. | def run_upgrade(
cur: LoggingTransaction,
database_engine: BaseDatabaseEngine,
config: HomeServerConfig,
) -> None:
"""
Part 3 of a multi-step migration to drop the column `user_id` and replace it with
`full_user_id`. See the database schema docs for more information on the full
migration steps.
"""
hostname = config.server.server_name
if isinstance(database_engine, PostgresEngine):
# check if the constraint can be validated
check_sql = """
SELECT user_id from profiles WHERE full_user_id IS NULL
"""
cur.execute(check_sql)
res = cur.fetchall()
if res:
# there are rows the background job missed, finish them here before we validate the constraint
process_rows_sql = """
UPDATE profiles
SET full_user_id = '@' || user_id || ?
WHERE user_id IN (
SELECT user_id FROM profiles WHERE full_user_id IS NULL
)
"""
cur.execute(process_rows_sql, (f":{hostname}",))
# Now we can validate
validate_sql = """
ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null
"""
cur.execute(validate_sql)
else:
# in SQLite we need to rewrite the table to add the constraint.
# First drop any temporary table that might be here from a previous failed migration.
cur.execute("DROP TABLE IF EXISTS temp_profiles")
create_sql = """
CREATE TABLE temp_profiles (
full_user_id text NOT NULL,
user_id text,
displayname text,
avatar_url text,
UNIQUE (full_user_id),
UNIQUE (user_id)
)
"""
cur.execute(create_sql)
copy_sql = """
INSERT INTO temp_profiles (
user_id,
displayname,
avatar_url,
full_user_id)
SELECT user_id, displayname, avatar_url, '@' || user_id || ':' || ? FROM profiles
"""
cur.execute(copy_sql, (f"{hostname}",))
drop_sql = """
DROP TABLE profiles
"""
cur.execute(drop_sql)
rename_sql = """
ALTER TABLE temp_profiles RENAME to profiles
"""
cur.execute(rename_sql) |
Part 3 of a multi-step migration to drop the column `user_id` and replace it with
`full_user_id`. See the database schema docs for more information on the full
migration steps. | def run_upgrade(
cur: LoggingTransaction,
database_engine: BaseDatabaseEngine,
config: HomeServerConfig,
) -> None:
"""
Part 3 of a multi-step migration to drop the column `user_id` and replace it with
`full_user_id`. See the database schema docs for more information on the full
migration steps.
"""
hostname = config.server.server_name
if isinstance(database_engine, PostgresEngine):
# check if the constraint can be validated
check_sql = """
SELECT user_id from user_filters WHERE full_user_id IS NULL
"""
cur.execute(check_sql)
res = cur.fetchall()
if res:
# there are rows the background job missed, finish them here before we validate constraint
process_rows_sql = """
UPDATE user_filters
SET full_user_id = '@' || user_id || ?
WHERE user_id IN (
SELECT user_id FROM user_filters WHERE full_user_id IS NULL
)
"""
cur.execute(process_rows_sql, (f":{hostname}",))
# Now we can validate
validate_sql = """
ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null
"""
cur.execute(validate_sql)
else:
cur.execute("DROP TABLE IF EXISTS temp_user_filters")
create_sql = """
CREATE TABLE temp_user_filters (
full_user_id text NOT NULL,
user_id text NOT NULL,
filter_id bigint NOT NULL,
filter_json bytea NOT NULL
)
"""
cur.execute(create_sql)
index_sql = """
CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON
temp_user_filters (user_id, filter_id)
"""
cur.execute(index_sql)
copy_sql = """
INSERT INTO temp_user_filters (
user_id,
filter_id,
filter_json,
full_user_id)
SELECT user_id, filter_id, filter_json, '@' || user_id || ':' || ? FROM user_filters
"""
cur.execute(copy_sql, (f"{hostname}",))
drop_sql = """
DROP TABLE user_filters
"""
cur.execute(drop_sql)
rename_sql = """
ALTER TABLE temp_user_filters RENAME to user_filters
"""
cur.execute(rename_sql) |
Fix to drop unused indexes caused by incorrectly adding UNIQUE constraint to
columns `user_id` and `full_user_id` of table `user_filters` in previous migration. | def run_update(
cur: LoggingTransaction,
database_engine: BaseDatabaseEngine,
config: HomeServerConfig,
) -> None:
"""
Fix to drop unused indexes caused by incorrectly adding UNIQUE constraint to
columns `user_id` and `full_user_id` of table `user_filters` in previous migration.
"""
if isinstance(database_engine, Sqlite3Engine):
cur.execute("DROP TABLE IF EXISTS temp_user_filters")
create_sql = """
CREATE TABLE temp_user_filters (
full_user_id text NOT NULL,
user_id text NOT NULL,
filter_id bigint NOT NULL,
filter_json bytea NOT NULL
)
"""
cur.execute(create_sql)
copy_sql = """
INSERT INTO temp_user_filters (
user_id,
filter_id,
filter_json,
full_user_id)
SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters
"""
cur.execute(copy_sql)
drop_sql = """
DROP TABLE user_filters
"""
cur.execute(drop_sql)
rename_sql = """
ALTER TABLE temp_user_filters RENAME to user_filters
"""
cur.execute(rename_sql)
index_sql = """
CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON
user_filters (user_id, filter_id)
"""
cur.execute(index_sql) |
An attempt to mitigate a painful race between foreground and background updates
touching the `stream_ordering` column of the events table. More info can be found
at https://github.com/matrix-org/synapse/issues/15677. | def run_create(
cur: LoggingTransaction,
database_engine: BaseDatabaseEngine,
) -> None:
"""
An attempt to mitigate a painful race between foreground and background updates
touching the `stream_ordering` column of the events table. More info can be found
at https://github.com/matrix-org/synapse/issues/15677.
"""
# technically the bg update we're concerned with below should only have been added in
# postgres but it doesn't hurt to be extra careful
if isinstance(database_engine, PostgresEngine):
select_sql = """
SELECT 1 FROM background_updates
WHERE update_name = 'replace_stream_ordering_column'
"""
cur.execute(select_sql)
res = cur.fetchone()
# if the background update `replace_stream_ordering_column` is still pending, we need
# to drop the indexes added in 7403, and re-add them to the column `stream_ordering2`
# with the idea that they will be preserved when the column is renamed `stream_ordering`
# after the background update has finished
if res:
drop_cse_sql = """
ALTER TABLE current_state_events DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey
"""
cur.execute(drop_cse_sql)
drop_lcm_sql = """
ALTER TABLE local_current_membership DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey
"""
cur.execute(drop_lcm_sql)
drop_rm_sql = """
ALTER TABLE room_memberships DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey
"""
cur.execute(drop_rm_sql)
add_cse_sql = """
ALTER TABLE current_state_events ADD CONSTRAINT event_stream_ordering_fkey
FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
"""
cur.execute(add_cse_sql)
add_lcm_sql = """
ALTER TABLE local_current_membership ADD CONSTRAINT event_stream_ordering_fkey
FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
"""
cur.execute(add_lcm_sql)
add_rm_sql = """
ALTER TABLE room_memberships ADD CONSTRAINT event_stream_ordering_fkey
FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
"""
cur.execute(add_rm_sql) |
Get the best impl of SequenceGenerator available
This uses PostgresSequenceGenerator on postgres, and a locally-locked impl on
sqlite.
Args:
database_engine: the database engine we are connected to
get_first_callback: a callback which gets the next sequence ID. Used if
we're on sqlite.
sequence_name: the name of a postgres sequence to use.
table, id_column, stream_name, positive: If set then `check_consistency`
is called on the created sequence. See docstring for
`check_consistency` details. | def build_sequence_generator(
db_conn: "LoggingDatabaseConnection",
database_engine: BaseDatabaseEngine,
get_first_callback: GetFirstCallbackType,
sequence_name: str,
table: Optional[str],
id_column: Optional[str],
stream_name: Optional[str] = None,
positive: bool = True,
) -> SequenceGenerator:
"""Get the best impl of SequenceGenerator available
This uses PostgresSequenceGenerator on postgres, and a locally-locked impl on
sqlite.
Args:
database_engine: the database engine we are connected to
get_first_callback: a callback which gets the next sequence ID. Used if
we're on sqlite.
sequence_name: the name of a postgres sequence to use.
table, id_column, stream_name, positive: If set then `check_consistency`
is called on the created sequence. See docstring for
`check_consistency` details.
"""
if isinstance(database_engine, PostgresEngine):
seq: SequenceGenerator = PostgresSequenceGenerator(sequence_name)
else:
seq = LocalSequenceGenerator(get_first_callback)
if table:
assert id_column
seq.check_consistency(
db_conn=db_conn,
table=table,
id_column=id_column,
stream_name=stream_name,
positive=positive,
)
return seq |
Create a new ``Requester`` object
Args:
user_id: id of the user making the request
access_token_id: *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest: True if the user making this request is a guest user
scope: the scope of the access token used for this request, if any
shadow_banned: True if the user making this request is shadow-banned.
device_id: device_id which was set at authentication time
app_service: the AS requesting on behalf of the user
authenticated_entity: The entity that authenticated when making the request.
This is different to the user_id when an admin user or the server is
"puppeting" the user.
Returns:
Requester | def create_requester(
user_id: Union[str, "UserID"],
access_token_id: Optional[int] = None,
is_guest: bool = False,
scope: StrCollection = (),
shadow_banned: bool = False,
device_id: Optional[str] = None,
app_service: Optional["ApplicationService"] = None,
authenticated_entity: Optional[str] = None,
) -> Requester:
"""
Create a new ``Requester`` object
Args:
user_id: id of the user making the request
access_token_id: *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest: True if the user making this request is a guest user
scope: the scope of the access token used for this request, if any
shadow_banned: True if the user making this request is shadow-banned.
device_id: device_id which was set at authentication time
app_service: the AS requesting on behalf of the user
authenticated_entity: The entity that authenticated when making the request.
This is different to the user_id when an admin user or the server is
"puppeting" the user.
Returns:
Requester
"""
if not isinstance(user_id, UserID):
user_id = UserID.from_string(user_id)
if authenticated_entity is None:
authenticated_entity = user_id.to_string()
scope = set(scope)
return Requester(
user_id,
access_token_id,
is_guest,
scope,
shadow_banned,
device_id,
app_service,
authenticated_entity,
) |
Check for characters not allowed in an mxid or groupid localpart
Args:
localpart: the localpart to be checked
use_extended_character_set: True to use the extended allowed characters
from MSC4009.
Returns:
True if there are any naughty characters | def contains_invalid_mxid_characters(localpart: str) -> bool:
"""Check for characters not allowed in an mxid or groupid localpart
Args:
localpart: the localpart to be checked
use_extended_character_set: True to use the extended allowed characters
from MSC4009.
Returns:
True if there are any naughty characters
"""
return any(c not in MXID_LOCALPART_ALLOWED_CHARACTERS for c in localpart) |
Map a username onto a string suitable for a MXID
This follows the algorithm laid out at
https://matrix.org/docs/spec/appendices.html#mapping-from-other-character-sets.
Args:
username: username to be mapped
case_sensitive: true if TEST and test should be mapped
onto different mxids
Returns:
string suitable for a mxid localpart | def map_username_to_mxid_localpart(
username: Union[str, bytes], case_sensitive: bool = False
) -> str:
"""Map a username onto a string suitable for a MXID
This follows the algorithm laid out at
https://matrix.org/docs/spec/appendices.html#mapping-from-other-character-sets.
Args:
username: username to be mapped
case_sensitive: true if TEST and test should be mapped
onto different mxids
Returns:
string suitable for a mxid localpart
"""
if not isinstance(username, bytes):
username = username.encode("utf-8")
# first we sort out upper-case characters
if case_sensitive:
def f1(m: Match[bytes]) -> bytes:
return b"_" + m.group().lower()
username = UPPER_CASE_PATTERN.sub(f1, username)
else:
username = username.lower()
# then we sort out non-ascii characters by converting to the hex equivalent.
def f2(m: Match[bytes]) -> bytes:
return b"=%02x" % (m.group()[0],)
username = NON_MXID_CHARACTER_PATTERN.sub(f2, username)
# we also do the =-escaping to mxids starting with an underscore.
username = re.sub(b"^_", b"=5f", username)
# we should now only have ascii bytes left, so can decode back to a string.
return username.decode("ascii") |
Get the key ID and signedjson verify key from a cross-signing key dict
Args:
key_info: a cross-signing key dict, which must have a "keys"
property that has exactly one item in it
Returns:
the key ID and verify key for the cross-signing key | def get_verify_key_from_cross_signing_key(
key_info: Mapping[str, Any]
) -> Tuple[str, VerifyKey]:
"""Get the key ID and signedjson verify key from a cross-signing key dict
Args:
key_info: a cross-signing key dict, which must have a "keys"
property that has exactly one item in it
Returns:
the key ID and verify key for the cross-signing key
"""
# make sure that a `keys` field is provided
if "keys" not in key_info:
raise ValueError("Invalid key")
keys = key_info["keys"]
# and that it contains exactly one key
if len(keys) == 1:
key_id, key_data = next(iter(keys.items()))
return key_id, decode_verify_key_bytes(key_id, decode_base64(key_data))
else:
raise ValueError("Invalid key") |
Combines a tuple of `Deferred`s into a single `Deferred`.
Wraps `defer.gatherResults` to provide type annotations that support heterogenous
lists of `Deferred`s. | def gather_results( # type: ignore[misc]
deferredList: Tuple["defer.Deferred[T1]", ...],
consumeErrors: bool = False,
) -> "defer.Deferred[Tuple[T1, ...]]":
"""Combines a tuple of `Deferred`s into a single `Deferred`.
Wraps `defer.gatherResults` to provide type annotations that support heterogenous
lists of `Deferred`s.
"""
# The `type: ignore[misc]` above suppresses
# "Overloaded function implementation cannot produce return type of signature 1/2/3"
deferred = defer.gatherResults(deferredList, consumeErrors=consumeErrors)
return deferred.addCallback(tuple) |
The in built twisted `Deferred.addTimeout` fails to time out deferreds
that have a canceller that throws exceptions. This method creates a new
deferred that wraps and times out the given deferred, correctly handling
the case where the given deferred's canceller throws.
(See https://twistedmatrix.com/trac/ticket/9534)
NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred.
NOTE: the TimeoutError raised by the resultant deferred is
twisted.internet.defer.TimeoutError, which is *different* to the built-in
TimeoutError, as well as various other TimeoutErrors you might have imported.
Args:
deferred: The Deferred to potentially timeout.
timeout: Timeout in seconds
reactor: The twisted reactor to use
Returns:
A new Deferred, which will errback with defer.TimeoutError on timeout. | def timeout_deferred(
deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime
) -> "defer.Deferred[_T]":
"""The in built twisted `Deferred.addTimeout` fails to time out deferreds
that have a canceller that throws exceptions. This method creates a new
deferred that wraps and times out the given deferred, correctly handling
the case where the given deferred's canceller throws.
(See https://twistedmatrix.com/trac/ticket/9534)
NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred.
NOTE: the TimeoutError raised by the resultant deferred is
twisted.internet.defer.TimeoutError, which is *different* to the built-in
TimeoutError, as well as various other TimeoutErrors you might have imported.
Args:
deferred: The Deferred to potentially timeout.
timeout: Timeout in seconds
reactor: The twisted reactor to use
Returns:
A new Deferred, which will errback with defer.TimeoutError on timeout.
"""
new_d: "defer.Deferred[_T]" = defer.Deferred()
timed_out = [False]
def time_it_out() -> None:
timed_out[0] = True
try:
deferred.cancel()
except Exception: # if we throw any exception it'll break time outs
logger.exception("Canceller failed during timeout")
# the cancel() call should have set off a chain of errbacks which
# will have errbacked new_d, but in case it hasn't, errback it now.
if not new_d.called:
new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,)))
delayed_call = reactor.callLater(timeout, time_it_out)
def convert_cancelled(value: Failure) -> Failure:
# if the original deferred was cancelled, and our timeout has fired, then
# the reason it was cancelled was due to our timeout. Turn the CancelledError
# into a TimeoutError.
if timed_out[0] and value.check(CancelledError):
raise defer.TimeoutError("Timed out after %gs" % (timeout,))
return value
deferred.addErrback(convert_cancelled)
def cancel_timeout(result: _T) -> _T:
# stop the pending call to cancel the deferred if it's been fired
if delayed_call.active():
delayed_call.cancel()
return result
deferred.addBoth(cancel_timeout)
def success_cb(val: _T) -> None:
if not new_d.called:
new_d.callback(val)
def failure_cb(val: Failure) -> None:
if not new_d.called:
new_d.errback(val)
deferred.addCallbacks(success_cb, failure_cb)
return new_d |
Convert a value to an awaitable if not already an awaitable. | def maybe_awaitable(value: Union[Awaitable[R], R]) -> Awaitable[R]:
"""Convert a value to an awaitable if not already an awaitable."""
if inspect.isawaitable(value):
return value
# For some reason mypy doesn't deduce that value is not Awaitable here, even though
# inspect.isawaitable returns a TypeGuard.
assert not isinstance(value, Awaitable)
return DoneAwaitable(value) |
Prevent a `Deferred` from being cancelled by wrapping it in another `Deferred`.
Args:
deferred: The `Deferred` to protect against cancellation. Must not follow the
Synapse logcontext rules.
Returns:
A new `Deferred`, which will contain the result of the original `Deferred`.
The new `Deferred` will not propagate cancellation through to the original.
When cancelled, the new `Deferred` will fail with a `CancelledError`.
The new `Deferred` will not follow the Synapse logcontext rules and should be
wrapped with `make_deferred_yieldable`. | def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
"""Prevent a `Deferred` from being cancelled by wrapping it in another `Deferred`.
Args:
deferred: The `Deferred` to protect against cancellation. Must not follow the
Synapse logcontext rules.
Returns:
A new `Deferred`, which will contain the result of the original `Deferred`.
The new `Deferred` will not propagate cancellation through to the original.
When cancelled, the new `Deferred` will fail with a `CancelledError`.
The new `Deferred` will not follow the Synapse logcontext rules and should be
wrapped with `make_deferred_yieldable`.
"""
new_deferred: "defer.Deferred[T]" = defer.Deferred()
deferred.chainDeferred(new_deferred)
return new_deferred |
Delay cancellation of a coroutine or `Deferred` awaitable until it resolves.
Has the same effect as `stop_cancellation`, but the returned `Deferred` will not
resolve with a `CancelledError` until the original awaitable resolves.
Args:
deferred: The coroutine or `Deferred` to protect against cancellation. May
optionally follow the Synapse logcontext rules.
Returns:
A new `Deferred`, which will contain the result of the original coroutine or
`Deferred`. The new `Deferred` will not propagate cancellation through to the
original coroutine or `Deferred`.
When cancelled, the new `Deferred` will wait until the original coroutine or
`Deferred` resolves before failing with a `CancelledError`.
The new `Deferred` will follow the Synapse logcontext rules if `awaitable`
follows the Synapse logcontext rules. Otherwise the new `Deferred` should be
wrapped with `make_deferred_yieldable`. | def delay_cancellation(awaitable: Awaitable[T]) -> Awaitable[T]:
"""Delay cancellation of a coroutine or `Deferred` awaitable until it resolves.
Has the same effect as `stop_cancellation`, but the returned `Deferred` will not
resolve with a `CancelledError` until the original awaitable resolves.
Args:
deferred: The coroutine or `Deferred` to protect against cancellation. May
optionally follow the Synapse logcontext rules.
Returns:
A new `Deferred`, which will contain the result of the original coroutine or
`Deferred`. The new `Deferred` will not propagate cancellation through to the
original coroutine or `Deferred`.
When cancelled, the new `Deferred` will wait until the original coroutine or
`Deferred` resolves before failing with a `CancelledError`.
The new `Deferred` will follow the Synapse logcontext rules if `awaitable`
follows the Synapse logcontext rules. Otherwise the new `Deferred` should be
wrapped with `make_deferred_yieldable`.
"""
# First, convert the awaitable into a `Deferred`.
if isinstance(awaitable, defer.Deferred):
deferred = awaitable
elif asyncio.iscoroutine(awaitable):
# Ideally we'd use `Deferred.fromCoroutine()` here, to save on redundant
# type-checking, but we'd need Twisted >= 21.2.
deferred = defer.ensureDeferred(awaitable)
else:
# We have no idea what to do with this awaitable.
# We assume it's already resolved, such as `DoneAwaitable`s or `Future`s from
# `make_awaitable`, and let the caller `await` it normally.
return awaitable
def handle_cancel(new_deferred: "defer.Deferred[T]") -> None:
# before the new deferred is cancelled, we `pause` it to stop the cancellation
# propagating. we then `unpause` it once the wrapped deferred completes, to
# propagate the exception.
new_deferred.pause()
new_deferred.errback(Failure(CancelledError()))
deferred.addBoth(lambda _: new_deferred.unpause())
new_deferred: "defer.Deferred[T]" = defer.Deferred(handle_cancel)
deferred.chainDeferred(new_deferred)
return new_deferred |
Marks a function as cancellable.
Servlet methods with this decorator will be cancelled if the client disconnects before we
finish processing the request.
Although this annotation is particularly useful for servlet methods, it's also
useful for intermediate functions, where it documents the fact that the function has
been audited for cancellation safety and needs to preserve that.
This then simplifies auditing new functions that call those same intermediate
functions.
During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping
the method. The `cancel()` call will propagate down to the `Deferred` that is
currently being waited on. That `Deferred` will raise a `CancelledError`, which will
propagate up, as per normal exception handling.
Before applying this decorator to a new function, you MUST recursively check
that all `await`s in the function are on `async` functions or `Deferred`s that
handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from
premature logging context closure, to stuck requests, to database corruption.
See the documentation page on Cancellation for more information.
Usage:
class SomeServlet(RestServlet):
@cancellable
async def on_GET(self, request: SynapseRequest) -> ...:
... | def cancellable(function: F) -> F:
"""Marks a function as cancellable.
Servlet methods with this decorator will be cancelled if the client disconnects before we
finish processing the request.
Although this annotation is particularly useful for servlet methods, it's also
useful for intermediate functions, where it documents the fact that the function has
been audited for cancellation safety and needs to preserve that.
This then simplifies auditing new functions that call those same intermediate
functions.
During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping
the method. The `cancel()` call will propagate down to the `Deferred` that is
currently being waited on. That `Deferred` will raise a `CancelledError`, which will
propagate up, as per normal exception handling.
Before applying this decorator to a new function, you MUST recursively check
that all `await`s in the function are on `async` functions or `Deferred`s that
handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from
premature logging context closure, to stuck requests, to database corruption.
See the documentation page on Cancellation for more information.
Usage:
class SomeServlet(RestServlet):
@cancellable
async def on_GET(self, request: SynapseRequest) -> ...:
...
"""
function.cancellable = True # type: ignore[attr-defined]
return function |
Checks whether a servlet method has the `@cancellable` flag. | def is_function_cancellable(function: Callable[..., Any]) -> bool:
"""Checks whether a servlet method has the `@cancellable` flag."""
return getattr(function, "cancellable", False) |
Yield pairs (requirement, must_be_installed). | def _generic_dependencies() -> Iterable[Dependency]:
"""Yield pairs (requirement, must_be_installed)."""
requirements = metadata.requires(DISTRIBUTION_NAME)
assert requirements is not None
for raw_requirement in requirements:
req = Requirement(raw_requirement)
if _is_dev_dependency(req) or _should_ignore_runtime_requirement(req):
continue
# https://packaging.pypa.io/en/latest/markers.html#usage notes that
# > Evaluating an extra marker with no environment is an error
# so we pass in a dummy empty extra value here.
must_be_installed = req.marker is None or req.marker.evaluate({"extra": ""})
yield Dependency(req, must_be_installed) |
Yield additional dependencies needed for a given `extra`. | def _dependencies_for_extra(extra: str) -> Iterable[Dependency]:
"""Yield additional dependencies needed for a given `extra`."""
requirements = metadata.requires(DISTRIBUTION_NAME)
assert requirements is not None
for raw_requirement in requirements:
req = Requirement(raw_requirement)
if _is_dev_dependency(req):
continue
# Exclude mandatory deps by only selecting deps needed with this extra.
if (
req.marker is not None
and req.marker.evaluate({"extra": extra})
and not req.marker.evaluate({"extra": ""})
):
yield Dependency(req, True) |
Check Synapse's dependencies are present and correctly versioned.
If provided, `extra` must be the name of an pacakging extra (e.g. "saml2" in
`pip install matrix-synapse[saml2]`).
If `extra` is None, this function checks that
- all mandatory dependencies are installed and correctly versioned, and
- each optional dependency that's installed is correctly versioned.
If `extra` is not None, this function checks that
- the dependencies needed for that extra are installed and correctly versioned.
:raises DependencyException: if a dependency is missing or incorrectly versioned.
:raises ValueError: if this extra does not exist. | def check_requirements(extra: Optional[str] = None) -> None:
"""Check Synapse's dependencies are present and correctly versioned.
If provided, `extra` must be the name of an pacakging extra (e.g. "saml2" in
`pip install matrix-synapse[saml2]`).
If `extra` is None, this function checks that
- all mandatory dependencies are installed and correctly versioned, and
- each optional dependency that's installed is correctly versioned.
If `extra` is not None, this function checks that
- the dependencies needed for that extra are installed and correctly versioned.
:raises DependencyException: if a dependency is missing or incorrectly versioned.
:raises ValueError: if this extra does not exist.
"""
# First work out which dependencies are required, and which are optional.
if extra is None:
dependencies = _generic_dependencies()
elif extra in RUNTIME_EXTRAS:
dependencies = _dependencies_for_extra(extra)
else:
raise ValueError(f"Synapse {VERSION} does not provide the feature '{extra}'")
deps_unfulfilled = []
errors = []
for requirement, must_be_installed in dependencies:
try:
dist: metadata.Distribution = metadata.distribution(requirement.name)
except metadata.PackageNotFoundError:
if must_be_installed:
deps_unfulfilled.append(requirement.name)
errors.append(_not_installed(requirement, extra))
else:
if dist.version is None:
# This shouldn't happen---it suggests a borked virtualenv. (See
# https://github.com/matrix-org/synapse/issues/12223)
# Try to give a vaguely helpful error message anyway.
# Type-ignore: the annotations don't reflect reality: see
# https://github.com/python/typeshed/issues/7513
# https://bugs.python.org/issue47060
deps_unfulfilled.append(requirement.name) # type: ignore[unreachable]
errors.append(_no_reported_version(requirement, extra))
# We specify prereleases=True to allow prereleases such as RCs.
elif not requirement.specifier.contains(dist.version, prereleases=True):
deps_unfulfilled.append(requirement.name)
errors.append(_incorrect_version(requirement, dist.version, extra))
if deps_unfulfilled:
for err in errors:
logging.error(err)
raise DependencyException(deps_unfulfilled) |
daemonize the current process
This calls fork(), and has the main process exit. When it returns we will be
running in the child process. | def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None:
"""daemonize the current process
This calls fork(), and has the main process exit. When it returns we will be
running in the child process.
"""
# If pidfile already exists, we should read pid from there; to overwrite it, if
# locking will fail, because locking attempt somehow purges the file contents.
if os.path.isfile(pid_file):
with open(pid_file) as pid_fh:
old_pid = pid_fh.read()
# Create a lockfile so that only one instance of this daemon is running at any time.
try:
lock_fh = open(pid_file, "w")
except OSError:
print("Unable to create the pidfile.")
sys.exit(1)
try:
# Try to get an exclusive lock on the file. This will fail if another process
# has the file locked.
fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
print("Unable to lock on the pidfile.")
# We need to overwrite the pidfile if we got here.
#
# XXX better to avoid overwriting it, surely. this looks racey as the pid file
# could be created between us trying to read it and us trying to lock it.
with open(pid_file, "w") as pid_fh:
pid_fh.write(old_pid)
sys.exit(1)
# Fork, creating a new process for the child.
process_id = os.fork()
if process_id != 0:
# parent process: exit.
# we use os._exit to avoid running the atexit handlers. In particular, that
# means we don't flush the logs. This is important because if we are using
# a MemoryHandler, we could have logs buffered which are now buffered in both
# the main and the child process, so if we let the main process flush the logs,
# we'll get two copies.
os._exit(0)
# This is the child process. Continue.
# Stop listening for signals that the parent process receives.
# This is done by getting a new process id.
# setpgrp() is an alternative to setsid().
# setsid puts the process in a new parent group and detaches its controlling
# terminal.
os.setsid()
# point stdin, stdout, stderr at /dev/null
devnull = "/dev/null"
if hasattr(os, "devnull"):
# Python has set os.devnull on this system, use it instead as it might be
# different than /dev/null.
devnull = os.devnull
devnull_fd = os.open(devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2)
os.close(devnull_fd)
# now that we have redirected stderr to /dev/null, any uncaught exceptions will
# get sent to /dev/null, so make sure we log them.
#
# (we don't normally expect reactor.run to raise any exceptions, but this will
# also catch any other uncaught exceptions before we get that far.)
def excepthook(
type_: Type[BaseException],
value: BaseException,
traceback: Optional[TracebackType],
) -> None:
logger.critical("Unhanded exception", exc_info=(type_, value, traceback))
sys.excepthook = excepthook
# Set umask to default to safe file permissions when running as a root daemon. 027
# is an octal number which we are typing as 0o27 for Python3 compatibility.
os.umask(0o27)
# Change to a known directory. If this isn't done, starting a daemon in a
# subdirectory that needs to be deleted results in "directory busy" errors.
os.chdir(chdir)
try:
lock_fh.write("%s" % (os.getpid()))
lock_fh.flush()
except OSError:
logger.error("Unable to write pid to the pidfile.")
print("Unable to write pid to the pidfile.")
sys.exit(1)
# write a log line on SIGTERM.
def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn:
logger.warning("Caught signal %s. Stopping daemon." % signum)
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm)
# Cleanup pid file at exit.
def exit() -> None:
logger.warning("Stopping daemon.")
os.remove(pid_file)
sys.exit(0)
atexit.register(exit)
logger.warning("Starting daemon.") |
SHA256 hash an input string, encode the digest as url-safe base64, and
return
Args:
input_text: string to hash
returns:
A sha256 hashed and url-safe base64 encoded digest | def sha256_and_url_safe_base64(input_text: str) -> str:
"""SHA256 hash an input string, encode the digest as url-safe base64, and
return
Args:
input_text: string to hash
returns:
A sha256 hashed and url-safe base64 encoded digest
"""
digest = hashlib.sha256(input_text.encode()).digest()
return unpaddedbase64.encode_base64(digest, urlsafe=True) |
Create the resource tree for this homeserver.
This in unduly complicated because Twisted does not support putting
child resources more than 1 level deep at a time.
Args:
desired_tree: Dict from desired paths to desired resources.
root_resource: The root resource to add the tree to.
Returns:
The ``root_resource`` with a tree of child resources added to it. | def create_resource_tree(
desired_tree: Dict[str, Resource], root_resource: Resource
) -> Resource:
"""Create the resource tree for this homeserver.
This in unduly complicated because Twisted does not support putting
child resources more than 1 level deep at a time.
Args:
desired_tree: Dict from desired paths to desired resources.
root_resource: The root resource to add the tree to.
Returns:
The ``root_resource`` with a tree of child resources added to it.
"""
# ideally we'd just use getChild and putChild but getChild doesn't work
# unless you give it a Request object IN ADDITION to the name :/ So
# instead, we'll store a copy of this mapping so we can actually add
# extra resources to existing nodes. See self._resource_id for the key.
resource_mappings: Dict[str, Resource] = {}
for full_path_str, res in desired_tree.items():
# twisted requires all resources to be bytes
full_path = full_path_str.encode("utf-8")
logger.info("Attaching %s to path %s", res, full_path)
last_resource = root_resource
for path_seg in full_path.split(b"/")[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
child_resource: Resource = UnrecognizedRequestResource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
last_resource = child_resource
else:
# we have an existing Resource, use that instead.
res_id = _resource_id(last_resource, path_seg)
last_resource = resource_mappings[res_id]
# ===========================
# now attach the actual desired resource
last_path_seg = full_path.split(b"/")[-1]
# if there is already a resource here, thieve its children and
# replace it
res_id = _resource_id(last_resource, last_path_seg)
if res_id in resource_mappings:
# there is a dummy resource at this path already, which needs
# to be replaced with the desired resource.
existing_dummy_resource = resource_mappings[res_id]
for child_name in existing_dummy_resource.listNames():
child_res_id = _resource_id(existing_dummy_resource, child_name)
child_resource = resource_mappings[child_res_id]
# steal the children
res.putChild(child_name, child_resource)
# finally, insert the desired resource in the right place
last_resource.putChild(last_path_seg, res)
res_id = _resource_id(last_resource, last_path_seg)
resource_mappings[res_id] = res
return root_resource |
Construct an arbitrary resource ID so you can retrieve the mapping
later.
If you want to represent resource A putChild resource B with path C,
the mapping should looks like _resource_id(A,C) = B.
Args:
resource: The *parent* Resourceb
path_seg: The name of the child Resource to be attached.
Returns:
A unique string which can be a key to the child Resource. | def _resource_id(resource: Resource, path_seg: bytes) -> str:
"""Construct an arbitrary resource ID so you can retrieve the mapping
later.
If you want to represent resource A putChild resource B with path C,
the mapping should looks like _resource_id(A,C) = B.
Args:
resource: The *parent* Resourceb
path_seg: The name of the child Resource to be attached.
Returns:
A unique string which can be a key to the child Resource.
"""
return "%s-%r" % (resource, path_seg) |
batch an iterable up into tuples with a maximum size
Args:
iterable: the iterable to slice
size: the maximum batch size
Returns:
an iterator over the chunks | def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]:
"""batch an iterable up into tuples with a maximum size
Args:
iterable: the iterable to slice
size: the maximum batch size
Returns:
an iterator over the chunks
"""
# make sure we can deal with iterables like lists too
sourceiter = iter(iterable)
# call islice until it returns an empty tuple
return iter(lambda: tuple(islice(sourceiter, size)), ()) |
Split the given sequence into chunks of the given size
The last chunk may be shorter than the given size.
If the input is empty, no chunks are returned. | def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]:
"""Split the given sequence into chunks of the given size
The last chunk may be shorter than the given size.
If the input is empty, no chunks are returned.
"""
return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen)) |
Separate a given iterable into two lists based on the result of a predicate function.
Args:
iterable: the iterable to partition (separate)
predicate: a function that takes an item from the iterable and returns a boolean
Returns:
A tuple of two lists, the first containing all items for which the predicate
returned True, the second containing all items for which the predicate returned
False | def partition(
iterable: Iterable[T], predicate: Callable[[T], bool]
) -> Tuple[List[T], List[T]]:
"""
Separate a given iterable into two lists based on the result of a predicate function.
Args:
iterable: the iterable to partition (separate)
predicate: a function that takes an item from the iterable and returns a boolean
Returns:
A tuple of two lists, the first containing all items for which the predicate
returned True, the second containing all items for which the predicate returned
False
"""
true_results = []
false_results = []
for item in iterable:
if predicate(item):
true_results.append(item)
else:
false_results.append(item)
return true_results, false_results |
Given a set of nodes and a graph, yield the nodes in toplogical order.
For example `sorted_topologically([1, 2], {1: [2]})` will yield `2, 1`. | def sorted_topologically(
nodes: Iterable[T],
graph: Mapping[T, Collection[T]],
) -> Generator[T, None, None]:
"""Given a set of nodes and a graph, yield the nodes in toplogical order.
For example `sorted_topologically([1, 2], {1: [2]})` will yield `2, 1`.
"""
# This is implemented by Kahn's algorithm.
degree_map = {node: 0 for node in nodes}
reverse_graph: Dict[T, Set[T]] = {}
for node, edges in graph.items():
if node not in degree_map:
continue
for edge in set(edges):
if edge in degree_map:
degree_map[node] += 1
reverse_graph.setdefault(edge, set()).add(node)
reverse_graph.setdefault(node, set())
zero_degree = [node for node, degree in degree_map.items() if degree == 0]
heapq.heapify(zero_degree)
while zero_degree:
node = heapq.heappop(zero_degree)
yield node
for edge in reverse_graph.get(node, []):
if edge in degree_map:
degree_map[edge] -= 1
if degree_map[edge] == 0:
heapq.heappush(zero_degree, edge) |
Walk the graph topologically, returning batches of nodes where all nodes
that references it have been previously returned.
For example, given the following graph:
A
/ \
B C
\ /
D
This function will return: `[[A], [B, C], [D]]`.
This function is useful for e.g. batch persisting events in an auth chain,
where we can only persist an event if all its auth events have already been
persisted. | def sorted_topologically_batched(
nodes: Iterable[T],
graph: Mapping[T, Collection[T]],
) -> Generator[Collection[T], None, None]:
r"""Walk the graph topologically, returning batches of nodes where all nodes
that references it have been previously returned.
For example, given the following graph:
A
/ \
B C
\ /
D
This function will return: `[[A], [B, C], [D]]`.
This function is useful for e.g. batch persisting events in an auth chain,
where we can only persist an event if all its auth events have already been
persisted.
"""
degree_map = {node: 0 for node in nodes}
reverse_graph: Dict[T, Set[T]] = {}
for node, edges in graph.items():
if node not in degree_map:
continue
for edge in set(edges):
if edge in degree_map:
degree_map[node] += 1
reverse_graph.setdefault(edge, set()).add(node)
reverse_graph.setdefault(node, set())
zero_degree = [node for node, degree in degree_map.items() if degree == 0]
while zero_degree:
new_zero_degree = []
for node in zero_degree:
for edge in reverse_graph.get(node, []):
if edge in degree_map:
degree_map[edge] -= 1
if degree_map[edge] == 0:
new_zero_degree.append(edge)
yield zero_degree
zero_degree = new_zero_degree |
Extracts a caveat value from a macaroon token.
Checks that there is exactly one caveat of the form "key = <val>" in the macaroon,
and returns the extracted value.
Args:
macaroon: the token
key: the key of the caveat to extract
Returns:
The extracted value
Raises:
MacaroonVerificationFailedException: if there are conflicting values for the
caveat in the macaroon, or if the caveat was not found in the macaroon. | def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
"""Extracts a caveat value from a macaroon token.
Checks that there is exactly one caveat of the form "key = <val>" in the macaroon,
and returns the extracted value.
Args:
macaroon: the token
key: the key of the caveat to extract
Returns:
The extracted value
Raises:
MacaroonVerificationFailedException: if there are conflicting values for the
caveat in the macaroon, or if the caveat was not found in the macaroon.
"""
prefix = key + " = "
result: Optional[str] = None
for caveat in macaroon.caveats:
if not caveat.caveat_id.startswith(prefix):
continue
val = caveat.caveat_id[len(prefix) :]
if result is None:
# first time we found this caveat: record the value
result = val
elif val != result:
# on subsequent occurrences, raise if the value is different.
raise MacaroonVerificationFailedException(
"Conflicting values for caveat " + key
)
if result is not None:
return result
# If the caveat is not there, we raise a MacaroonVerificationFailedException.
# Note that it is insecure to generate a macaroon without all the caveats you
# might need (because there is nothing stopping people from adding extra caveats),
# so if the caveat isn't there, something odd must be going on.
raise MacaroonVerificationFailedException("No %s caveat in macaroon" % (key,)) |
Make a macaroon verifier which accepts 'time' caveats
Builds a caveat verifier which will accept unexpired 'time' caveats, and adds it to
the given macaroon verifier.
Args:
v: the macaroon verifier
get_time_ms: a callable which will return the timestamp after which the caveat
should be considered expired. Normally the current time. | def satisfy_expiry(v: pymacaroons.Verifier, get_time_ms: Callable[[], int]) -> None:
"""Make a macaroon verifier which accepts 'time' caveats
Builds a caveat verifier which will accept unexpired 'time' caveats, and adds it to
the given macaroon verifier.
Args:
v: the macaroon verifier
get_time_ms: a callable which will return the timestamp after which the caveat
should be considered expired. Normally the current time.
"""
def verify_expiry_caveat(caveat: str) -> bool:
time_msec = get_time_ms()
prefix = "time < "
if not caveat.startswith(prefix):
return False
expiry = int(caveat[len(prefix) :])
return time_msec < expiry
v.satisfy_general(verify_expiry_caveat) |
Starts a ssh listener with password authentication using
the given username and password. Clients connecting to the ssh
listener will find themselves in a colored python shell with
the supplied globals.
Args:
username: The username ssh clients should auth with.
password: The password ssh clients should auth with.
globals: The variables to expose in the shell.
Returns:
A factory to pass to ``listenTCP`` | def manhole(settings: ManholeConfig, globals: Dict[str, Any]) -> ServerFactory:
"""Starts a ssh listener with password authentication using
the given username and password. Clients connecting to the ssh
listener will find themselves in a colored python shell with
the supplied globals.
Args:
username: The username ssh clients should auth with.
password: The password ssh clients should auth with.
globals: The variables to expose in the shell.
Returns:
A factory to pass to ``listenTCP``
"""
username = settings.username
password = settings.password.encode("ascii")
priv_key = settings.priv_key
if priv_key is None:
priv_key = Key.fromString(PRIVATE_KEY)
pub_key = settings.pub_key
if pub_key is None:
pub_key = Key.fromString(PUBLIC_KEY)
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(**{username: password})
rlm = manhole_ssh.TerminalRealm()
# mypy ignored here because:
# - can't deduce types of lambdas
# - variable is Type[ServerProtocol], expr is Callable[[], ServerProtocol]
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol( # type: ignore[misc,assignment]
SynapseManhole, dict(globals, __name__="__console__")
)
# type-ignore: This is an error in Twisted's annotations. See
# https://github.com/twisted/twisted/issues/11812 and /11813 .
factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker])) # type: ignore[arg-type]
# conch has the wrong type on these dicts (says bytes to bytes,
# should be bytes to Keys judging by how it's used).
factory.privateKeys[b"ssh-rsa"] = priv_key # type: ignore[assignment]
factory.publicKeys[b"ssh-rsa"] = pub_key # type: ignore[assignment]
# ConchFactory is a Factory, not a ServerFactory, but they are identical.
return factory |
Decorate an async method with a `Measure` context manager.
The Measure is created using `self.clock`; it should only be used to decorate
methods in classes defining an instance-level `clock` attribute.
Usage:
@measure_func()
async def foo(...):
...
Which is analogous to:
async def foo(...):
with Measure(...):
... | def measure_func(
name: Optional[str] = None,
) -> Callable[[Callable[P, Awaitable[R]]], Callable[P, Awaitable[R]]]:
"""Decorate an async method with a `Measure` context manager.
The Measure is created using `self.clock`; it should only be used to decorate
methods in classes defining an instance-level `clock` attribute.
Usage:
@measure_func()
async def foo(...):
...
Which is analogous to:
async def foo(...):
with Measure(...):
...
"""
def wrapper(
func: Callable[Concatenate[HasClock, P], Awaitable[R]]
) -> Callable[P, Awaitable[R]]:
block_name = func.__name__ if name is None else name
@wraps(func)
async def measured_func(self: HasClock, *args: P.args, **kwargs: P.kwargs) -> R:
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
# There are some shenanigans here, because we're decorating a method but
# explicitly making use of the `self` parameter. The key thing here is that the
# return type within the return type for `measure_func` itself describes how the
# decorated function will be called.
return measured_func # type: ignore[return-value]
return wrapper |
Loads a synapse module with its config
Args:
provider: a dict with keys 'module' (the module name) and 'config'
(the config dict).
config_path: the path within the config file. This will be used as a basis
for any error message.
Returns
Tuple of (provider class, parsed config object) | def load_module(provider: dict, config_path: StrSequence) -> Tuple[Type, Any]:
"""Loads a synapse module with its config
Args:
provider: a dict with keys 'module' (the module name) and 'config'
(the config dict).
config_path: the path within the config file. This will be used as a basis
for any error message.
Returns
Tuple of (provider class, parsed config object)
"""
modulename = provider.get("module")
if not isinstance(modulename, str):
raise ConfigError("expected a string", path=tuple(config_path) + ("module",))
# We need to import the module, and then pick the class out of
# that, so we split based on the last dot.
module_name, clz = modulename.rsplit(".", 1)
module = importlib.import_module(module_name)
provider_class = getattr(module, clz)
# Load the module config. If None, pass an empty dictionary instead
module_config = provider.get("config") or {}
if hasattr(provider_class, "parse_config"):
try:
provider_config = provider_class.parse_config(module_config)
except jsonschema.ValidationError as e:
raise json_error_to_config_error(e, tuple(config_path) + ("config",))
except ConfigError as e:
raise _wrap_config_error(
"Failed to parse config for module %r" % (modulename,),
prefix=tuple(config_path) + ("config",),
e=e,
)
except Exception as e:
raise ConfigError(
"Failed to parse config for module %r" % (modulename,),
path=tuple(config_path) + ("config",),
) from e
else:
provider_config = module_config
return provider_class, provider_config |
Load a python module, and return a reference to its global namespace
Args:
location: path to the module
Returns:
python module object | def load_python_module(location: str) -> ModuleType:
"""Load a python module, and return a reference to its global namespace
Args:
location: path to the module
Returns:
python module object
"""
spec = importlib.util.spec_from_file_location(location, location)
if spec is None:
raise Exception("Unable to load module at %s" % (location,))
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
return mod |
Wrap a relative ConfigError with a new path
This is useful when we have a ConfigError with a relative path due to a problem
parsing part of the config, and we now need to set it in context. | def _wrap_config_error(msg: str, prefix: StrSequence, e: ConfigError) -> "ConfigError":
"""Wrap a relative ConfigError with a new path
This is useful when we have a ConfigError with a relative path due to a problem
parsing part of the config, and we now need to set it in context.
"""
path = prefix
if e.path:
path = tuple(prefix) + tuple(e.path)
e1 = ConfigError(msg, path)
# ideally we would set the 'cause' of the new exception to the original exception;
# however now that we have merged the path into our own, the stringification of
# e will be incorrect, so instead we create a new exception with just the "msg"
# part.
e1.__cause__ = Exception(e.msg)
e1.__cause__.__cause__ = e.__cause__
return e1 |
Takes an ISO-3166-1 2 letter country code and phone number and
returns an msisdn representing the canonical version of that
phone number.
As an example, if `country` is "GB" and `number` is "7470674927", this
function will return "447470674927".
Args:
country: ISO-3166-1 2 letter country code
number: Phone number in a national or international format
Returns:
The canonical form of the phone number, as an msisdn.
Raises:
SynapseError if the number could not be parsed. | def phone_number_to_msisdn(country: str, number: str) -> str:
"""
Takes an ISO-3166-1 2 letter country code and phone number and
returns an msisdn representing the canonical version of that
phone number.
As an example, if `country` is "GB" and `number` is "7470674927", this
function will return "447470674927".
Args:
country: ISO-3166-1 2 letter country code
number: Phone number in a national or international format
Returns:
The canonical form of the phone number, as an msisdn.
Raises:
SynapseError if the number could not be parsed.
"""
try:
phoneNumber = phonenumbers.parse(number, country)
except phonenumbers.NumberParseException:
raise SynapseError(400, "Unable to parse phone number")
return phonenumbers.format_number(phoneNumber, phonenumbers.PhoneNumberFormat.E164)[
1:
] |
Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit | def do_patch() -> None:
"""
Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit
"""
from synapse.logging.context import current_context
global _already_patched
orig_inline_callbacks = defer.inlineCallbacks
if _already_patched:
return
def new_inline_callbacks(
f: Callable[P, Generator["Deferred[object]", object, T]]
) -> Callable[P, "Deferred[T]"]:
@functools.wraps(f)
def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]":
start_context = current_context()
changes: List[str] = []
orig: Callable[P, "Deferred[T]"] = orig_inline_callbacks(
_check_yield_points(f, changes)
)
try:
res: "Deferred[T]" = orig(*args, **kwargs)
except Exception:
if current_context() != start_context:
for err in changes:
print(err, file=sys.stderr)
err = "%s changed context from %s to %s on exception" % (
f,
start_context,
current_context(),
)
print(err, file=sys.stderr)
raise Exception(err)
raise
if not isinstance(res, Deferred) or res.called:
if current_context() != start_context:
for err in changes:
print(err, file=sys.stderr)
err = "Completed %s changed context from %s to %s" % (
f,
start_context,
current_context(),
)
# print the error to stderr because otherwise all we
# see in travis-ci is the 500 error
print(err, file=sys.stderr)
raise Exception(err)
return res
if current_context():
err = (
"%s returned incomplete deferred in non-sentinel context "
"%s (start was %s)"
) % (f, current_context(), start_context)
print(err, file=sys.stderr)
raise Exception(err)
def check_ctx(r: T) -> T:
if current_context() != start_context:
for err in changes:
print(err, file=sys.stderr)
err = "%s completion of %s changed context from %s to %s" % (
"Failure" if isinstance(r, Failure) else "Success",
f,
start_context,
current_context(),
)
print(err, file=sys.stderr)
raise Exception(err)
return r
res.addBoth(check_ctx)
return res
return wrapped
defer.inlineCallbacks = new_inline_callbacks
_already_patched = True |
Wraps a generator that is about to be passed to defer.inlineCallbacks
checking that after every yield the log contexts are correct.
It's perfectly valid for log contexts to change within a function, e.g. due
to new Measure blocks, so such changes are added to the given `changes`
list instead of triggering an exception.
Args:
f: generator function to wrap
changes: A list of strings detailing how the contexts
changed within a function.
Returns:
function | def _check_yield_points(
f: Callable[P, Generator["Deferred[object]", object, T]],
changes: List[str],
) -> Callable:
"""Wraps a generator that is about to be passed to defer.inlineCallbacks
checking that after every yield the log contexts are correct.
It's perfectly valid for log contexts to change within a function, e.g. due
to new Measure blocks, so such changes are added to the given `changes`
list instead of triggering an exception.
Args:
f: generator function to wrap
changes: A list of strings detailing how the contexts
changed within a function.
Returns:
function
"""
from synapse.logging.context import current_context
@functools.wraps(f)
def check_yield_points_inner(
*args: P.args, **kwargs: P.kwargs
) -> Generator["Deferred[object]", object, T]:
gen = f(*args, **kwargs)
last_yield_line_no = gen.gi_frame.f_lineno
result: Any = None
while True:
expected_context = current_context()
try:
isFailure = isinstance(result, Failure)
if isFailure:
d = result.throwExceptionIntoGenerator(gen)
else:
d = gen.send(result)
except (StopIteration, defer._DefGen_Return) as e:
if current_context() != expected_context:
# This happens when the context is lost sometime *after* the
# final yield and returning. E.g. we forgot to yield on a
# function that returns a deferred.
#
# We don't raise here as it's perfectly valid for contexts to
# change in a function, as long as it sets the correct context
# on resolving (which is checked separately).
err = (
"Function %r returned and changed context from %s to %s,"
" in %s between %d and end of func"
% (
f.__qualname__,
expected_context,
current_context(),
f.__code__.co_filename,
last_yield_line_no,
)
)
changes.append(err)
# The `StopIteration` or `_DefGen_Return` contains the return value from the
# generator.
return cast(T, e.value)
frame = gen.gi_frame
if isinstance(d, defer.Deferred) and not d.called:
# This happens if we yield on a deferred that doesn't follow
# the log context rules without wrapping in a `make_deferred_yieldable`.
# We raise here as this should never happen.
if current_context():
err = (
"%s yielded with context %s rather than sentinel,"
" yielded on line %d in %s"
% (
frame.f_code.co_name,
current_context(),
frame.f_lineno,
frame.f_code.co_filename,
)
)
raise Exception(err)
# the wrapped function yielded a Deferred: yield it back up to the parent
# inlineCallbacks().
try:
result = yield d
except Exception:
# this will fish an earlier Failure out of the stack where possible, and
# thus is preferable to passing in an exception to the Failure
# constructor, since it results in less stack-mangling.
result = Failure()
if current_context() != expected_context:
# This happens because the context is lost sometime *after* the
# previous yield and *after* the current yield. E.g. the
# deferred we waited on didn't follow the rules, or we forgot to
# yield on a function between the two yield points.
#
# We don't raise here as its perfectly valid for contexts to
# change in a function, as long as it sets the correct context
# on resolving (which is checked separately).
err = (
"%s changed context from %s to %s, happened between lines %d and %d in %s"
% (
frame.f_code.co_name,
expected_context,
current_context(),
last_yield_line_no,
frame.f_lineno,
frame.f_code.co_filename,
)
)
changes.append(err)
last_yield_line_no = frame.f_lineno
return check_yield_points_inner |
Returns a count of something (slept/rejected hosts) by (metrics_name) | def _get_counts_from_rate_limiter_instance(
count_func: Callable[["FederationRateLimiter"], int]
) -> Mapping[Tuple[str, ...], int]:
"""Returns a count of something (slept/rejected hosts) by (metrics_name)"""
# Cast to a list to prevent it changing while the Prometheus
# thread is collecting metrics
with _rate_limiter_instances_lock:
rate_limiter_instances = list(_rate_limiter_instances)
# Map from (metrics_name,) -> int, the number of something like slept hosts
# or rejected hosts. The key type is Tuple[str], but we leave the length
# unspecified for compatability with LaterGauge's annotations.
counts: Dict[Tuple[str, ...], int] = {}
for rate_limiter_instance in rate_limiter_instances:
# Only track metrics if they provided a `metrics_name` to
# differentiate this instance of the rate limiter.
if rate_limiter_instance.metrics_name:
key = (rate_limiter_instance.metrics_name,)
counts[key] = count_func(rate_limiter_instance)
return counts |
For editable installs check if the rust library is outdated and needs to
be rebuilt. | def check_rust_lib_up_to_date() -> None:
"""For editable installs check if the rust library is outdated and needs to
be rebuilt.
"""
if not _dist_is_editable():
return
synapse_dir = os.path.dirname(synapse.__file__)
synapse_root = os.path.abspath(os.path.join(synapse_dir, ".."))
# Double check we've not gone into site-packages...
if os.path.basename(synapse_root) == "site-packages":
return
# ... and it looks like the root of a python project.
if not os.path.exists("pyproject.toml"):
return
# Get the hash of all Rust source files
hash = _hash_rust_files_in_directory(os.path.join(synapse_root, "rust", "src"))
if hash != get_rust_file_digest():
raise Exception("Rust module outdated. Please rebuild using `poetry install`") |
Get the hash of all files in a directory (recursively) | def _hash_rust_files_in_directory(directory: str) -> str:
"""Get the hash of all files in a directory (recursively)"""
directory = os.path.abspath(directory)
paths = []
dirs = [directory]
while dirs:
dir = dirs.pop()
with os.scandir(dir) as d:
for entry in d:
if entry.is_dir():
dirs.append(entry.path)
else:
paths.append(entry.path)
# We sort to make sure that we get a consistent and well-defined ordering.
paths.sort()
hasher = blake2b()
for path in paths:
with open(os.path.join(directory, path), "rb") as f:
hasher.update(f.read())
return hasher.hexdigest() |
Is distribution an editable install? | def _dist_is_editable() -> bool:
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, "matrix-synapse.egg-link")
if os.path.isfile(egg_link):
return True
return False |
Generate a cryptographically secure string of random letters.
Drawn from the characters: `a-z` and `A-Z` | def random_string(length: int) -> str:
"""Generate a cryptographically secure string of random letters.
Drawn from the characters: `a-z` and `A-Z`
"""
return "".join(secrets.choice(string.ascii_letters) for _ in range(length)) |
Generate a cryptographically secure string of random letters/numbers/symbols.
Drawn from the characters: `a-z`, `A-Z`, `0-9`, and `.,;:^&*-_+=#~@` | def random_string_with_symbols(length: int) -> str:
"""Generate a cryptographically secure string of random letters/numbers/symbols.
Drawn from the characters: `a-z`, `A-Z`, `0-9`, and `.,;:^&*-_+=#~@`
"""
return "".join(secrets.choice(_string_with_symbols) for _ in range(length)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.