response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Generate an IPSet from a list of IP addresses or CIDRs. Additionally, for each IPv4 network in the list of IP addresses, also includes the corresponding IPv6 networks. This includes: * IPv4-Compatible IPv6 Address (see RFC 4291, section 2.5.5.1) * IPv4-Mapped IPv6 Address (see RFC 4291, section 2.5.5.2) * 6to4 Address (see RFC 3056, section 2) Args: ip_addresses: An iterable of IP addresses or CIDRs. extra_addresses: An iterable of IP addresses or CIDRs. config_path: The path in the configuration for error messages. Returns: A new IP set.
def generate_ip_set( ip_addresses: Optional[Iterable[str]], extra_addresses: Optional[Iterable[str]] = None, config_path: Optional[StrSequence] = None, ) -> IPSet: """ Generate an IPSet from a list of IP addresses or CIDRs. Additionally, for each IPv4 network in the list of IP addresses, also includes the corresponding IPv6 networks. This includes: * IPv4-Compatible IPv6 Address (see RFC 4291, section 2.5.5.1) * IPv4-Mapped IPv6 Address (see RFC 4291, section 2.5.5.2) * 6to4 Address (see RFC 3056, section 2) Args: ip_addresses: An iterable of IP addresses or CIDRs. extra_addresses: An iterable of IP addresses or CIDRs. config_path: The path in the configuration for error messages. Returns: A new IP set. """ result = IPSet() for ip in itertools.chain(ip_addresses or (), extra_addresses or ()): try: network = IPNetwork(ip) except AddrFormatError as e: raise ConfigError( "Invalid IP range provided: %s." % (ip,), config_path ) from e result.add(network) # It is possible that these already exist in the set, but that's OK. if ":" not in str(network): result.add(IPNetwork(network).ipv6(ipv4_compatible=True)) result.add(IPNetwork(network).ipv6(ipv4_compatible=False)) result.add(_6to4(network)) return result
Check the threepid against the reserved threepid config Args: reserved_threepids: List of reserved threepids threepid: The threepid to test for Returns: Is the threepid undertest reserved_user
def is_threepid_reserved( reserved_threepids: List[JsonDict], threepid: JsonDict ) -> bool: """Check the threepid against the reserved threepid config Args: reserved_threepids: List of reserved threepids threepid: The threepid to test for Returns: Is the threepid undertest reserved_user """ for tp in reserved_threepids: if threepid["medium"] == tp["medium"] and threepid["address"] == tp["address"]: return True return False
Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied.
def read_gc_thresholds( thresholds: Optional[List[Any]], ) -> Optional[Tuple[int, int, int]]: """Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied. """ if thresholds is None: return None try: assert len(thresholds) == 3 return int(thresholds[0]), int(thresholds[1]), int(thresholds[2]) except Exception: raise ConfigError( "Value of `gc_threshold` must be a list of three integers if set" )
parse a listener config from the config file
def parse_listener_def(num: int, listener: Any) -> ListenerConfig: """parse a listener config from the config file""" if not isinstance(listener, dict): raise ConfigError("Expected a dictionary", ("listeners", str(num))) listener_type = listener["type"] # Raise a helpful error if direct TCP replication is still configured. if listener_type == "replication": raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type")) port = listener.get("port") socket_path = listener.get("path") # Either a port or a path should be declared at a minimum. Using both would be bad. if port is not None and not isinstance(port, int): raise ConfigError("Listener configuration is lacking a valid 'port' option") if socket_path is not None and not isinstance(socket_path, str): raise ConfigError("Listener configuration is lacking a valid 'path' option") if port and socket_path: raise ConfigError( "Can not have both a UNIX socket and an IP/port declared for the same " "resource!" ) if port is None and socket_path is None: raise ConfigError( "Must have either a UNIX socket or an IP/port declared for a given " "resource!" ) tls = listener.get("tls", False) http_config = None if listener_type == "http": try: resources = [ HttpResourceConfig(**res) for res in listener.get("resources", []) ] except ValueError as e: raise ConfigError("Unknown listener resource") from e # For a unix socket, default x_forwarded to True, as this is the only way of # getting a client IP. # Note: a reverse proxy is required anyway, as there is no way of exposing a # unix socket to the internet. http_config = HttpListenerConfig( x_forwarded=listener.get("x_forwarded", (True if socket_path else False)), resources=resources, additional_resources=listener.get("additional_resources", {}), tag=listener.get("tag"), request_id_header=listener.get("request_id_header"), experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False), ) if socket_path: # TODO: Add in path validation, like if the directory exists and is writable? # Set a default for the permission, in case it's left out socket_mode = listener.get("mode", 0o666) return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config) else: assert port is not None bind_addresses = listener.get("bind_addresses", []) bind_address = listener.get("bind_address") # if bind_address was specified, add it to the list of addresses if bind_address: bind_addresses.append(bind_address) # if we still have an empty list of addresses, use the default list if not bind_addresses: if listener_type == "metrics": # the metrics listener doesn't support IPv6 bind_addresses.append("0.0.0.0") else: bind_addresses.extend(DEFAULT_BIND_ADDRESSES) return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config)
Helper for allowing parsing a string or list of strings to a config option expecting a list of strings.
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: """Helper for allowing parsing a string or list of strings to a config option expecting a list of strings. """ if isinstance(obj, str): return [obj] return obj
Formats a config error neatly The idea is to format the immediate error, plus the "causes" of those errors, hopefully in a way that makes sense to the user. For example: Error in configuration at 'oidc_config.user_mapping_provider.config.display_name_template': Failed to parse config for module 'JinjaOidcMappingProvider': invalid jinja template: unexpected end of template, expected 'end of print statement'. Args: e: the error to be formatted Returns: An iterator which yields string fragments to be formatted
def format_config_error(e: ConfigError) -> Iterator[str]: """ Formats a config error neatly The idea is to format the immediate error, plus the "causes" of those errors, hopefully in a way that makes sense to the user. For example: Error in configuration at 'oidc_config.user_mapping_provider.config.display_name_template': Failed to parse config for module 'JinjaOidcMappingProvider': invalid jinja template: unexpected end of template, expected 'end of print statement'. Args: e: the error to be formatted Returns: An iterator which yields string fragments to be formatted """ yield "Error in configuration" if e.path: yield " at '%s'" % (".".join(e.path),) yield ":\n %s" % (e.msg,) parent_e = e.__cause__ indent = 1 while parent_e: indent += 1 yield ":\n%s%s" % (" " * indent, str(parent_e)) parent_e = parent_e.__cause__
Check if a file exists Unlike os.path.exists, this throws an exception if there is an error checking if the file exists (for example, if there is a perms error on the parent dir). Returns: True if the file exists; False if not.
def path_exists(file_path: str) -> bool: """Check if a file exists Unlike os.path.exists, this throws an exception if there is an error checking if the file exists (for example, if there is a perms error on the parent dir). Returns: True if the file exists; False if not. """ try: os.stat(file_path) return True except OSError as e: if e.errno != errno.ENOENT: raise e return False
Read the config files into a dict Args: config_files: A list of the config files to read Returns: The configuration dictionary.
def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]: """Read the config files into a dict Args: config_files: A list of the config files to read Returns: The configuration dictionary. """ specified_config = {} for config_file in config_files: with open(config_file) as file_stream: yaml_config = yaml.safe_load(file_stream) if not isinstance(yaml_config, dict): err = "File %r is empty or doesn't parse into a key-value map. IGNORING." print(err % (config_file,)) continue specified_config.update(yaml_config) if "server_name" not in specified_config: raise ConfigError(MISSING_SERVER_NAME) if "report_stats" not in specified_config: raise ConfigError( MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + MISSING_REPORT_STATS_SPIEL ) return specified_config
Finds config files using a list of search paths. If a path is a file then that file path is added to the list. If a search path is a directory then all the "*.yaml" files in that directory are added to the list in sorted order. Args: search_paths: A list of paths to search. Returns: A list of file paths.
def find_config_files(search_paths: List[str]) -> List[str]: """Finds config files using a list of search paths. If a path is a file then that file path is added to the list. If a search path is a directory then all the "*.yaml" files in that directory are added to the list in sorted order. Args: search_paths: A list of paths to search. Returns: A list of file paths. """ config_files = [] if search_paths: for config_path in search_paths: if os.path.isdir(config_path): # We accept specifying directories as config paths, we search # inside that directory for all files matching *.yaml, and then # we apply them in *sorted* order. files = [] for entry in os.listdir(config_path): entry_path = os.path.join(config_path, entry) if not os.path.isfile(entry_path): err = "Found subdirectory in config directory: %r. IGNORING." print(err % (entry_path,)) continue if not entry.endswith(".yaml"): err = ( "Found file in config directory that does not end in " "'.yaml': %r. IGNORING." ) print(err % (entry_path,)) continue files.append(entry_path) config_files.extend(sorted(files)) else: config_files.append(config_path) return config_files
Check the given file exists, and read it into a string If it does not, emit an error indicating the problem Args: file_path: the file to be read config_path: where in the configuration file_path came from, so that a useful error can be emitted if it does not exist. Returns: content of the file. Raises: ConfigError if there is a problem reading the file.
def read_file(file_path: Any, config_path: Iterable[str]) -> str: """Check the given file exists, and read it into a string If it does not, emit an error indicating the problem Args: file_path: the file to be read config_path: where in the configuration file_path came from, so that a useful error can be emitted if it does not exist. Returns: content of the file. Raises: ConfigError if there is a problem reading the file. """ if not isinstance(file_path, str): raise ConfigError("%r is not a string", config_path) try: os.stat(file_path) with open(file_path) as file_stream: return file_stream.read() except OSError as e: raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
Validates a config setting against a JsonSchema definition This can be used to validate a section of the config file against a schema definition. If the validation fails, a ConfigError is raised with a textual description of the problem. Args: json_schema: the schema to validate against config: the configuration value to be validated config_path: the path within the config file. This will be used as a basis for the error message. Raises: ConfigError, if validation fails.
def validate_config( json_schema: JsonDict, config: Any, config_path: StrSequence ) -> None: """Validates a config setting against a JsonSchema definition This can be used to validate a section of the config file against a schema definition. If the validation fails, a ConfigError is raised with a textual description of the problem. Args: json_schema: the schema to validate against config: the configuration value to be validated config_path: the path within the config file. This will be used as a basis for the error message. Raises: ConfigError, if validation fails. """ try: jsonschema.validate(config, json_schema) except jsonschema.ValidationError as e: raise json_error_to_config_error(e, config_path)
Converts a json validation error to a user-readable ConfigError Args: e: the exception to be converted config_path: the path within the config file. This will be used as a basis for the error message. Returns: a ConfigError
def json_error_to_config_error( e: jsonschema.ValidationError, config_path: StrSequence ) -> ConfigError: """Converts a json validation error to a user-readable ConfigError Args: e: the exception to be converted config_path: the path within the config file. This will be used as a basis for the error message. Returns: a ConfigError """ # copy `config_path` before modifying it. path = list(config_path) for p in list(e.absolute_path): if isinstance(p, int): path.append("<item %i>" % p) else: path.append(str(p)) return ConfigError(e.message, path)
Parse `config` as a mapping from strings to a given `Model` type. Args: config: The configuration data to check model_type: The BaseModel to validate and parse against. Returns: Fully validated and parsed Dict[str, Model]. Raises: ConfigError, if given improper input.
def parse_and_validate_mapping( config: Any, model_type: Type[Model], ) -> Dict[str, Model]: """Parse `config` as a mapping from strings to a given `Model` type. Args: config: The configuration data to check model_type: The BaseModel to validate and parse against. Returns: Fully validated and parsed Dict[str, Model]. Raises: ConfigError, if given improper input. """ try: # type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because # `model_type` is a runtime variable. Pydantic is fine with this. instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type] except ValidationError as e: raise ConfigError(str(e)) from e return instances
The 'information callback' for our openssl context objects. Note: Once this is set as the info callback on a Context object, the Context should only be used with the SSLClientConnectionCreator.
def _context_info_cb(ssl_connection: SSL.Connection, where: int, ret: int) -> None: """The 'information callback' for our openssl context objects. Note: Once this is set as the info callback on a Context object, the Context should only be used with the SSLClientConnectionCreator. """ # we assume that the app_data on the connection object has been set to # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator) tls_protocol = ssl_connection.get_app_data() try: # ... we further assume that SSLClientConnectionCreator has set the # '_synapse_tls_verifier' attribute to a ConnectionVerifier object. tls_protocol._synapse_tls_verifier.verify_context_info_cb(ssl_connection, where) except BaseException: # taken from the twisted implementation logger.exception("Error during info_callback") f = Failure() tls_protocol.failVerification(f)
Check whether the hash for this PDU matches the contents
def check_event_content_hash( event: EventBase, hash_algorithm: Hasher = hashlib.sha256 ) -> bool: """Check whether the hash for this PDU matches the contents""" name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm) logger.debug( "Verifying content hash on %s (expecting: %s)", event.event_id, encode_base64(expected_hash), ) # some malformed events lack a 'hashes'. Protect against it being missing # or a weird type by basically treating it the same as an unhashed event. hashes = event.get("hashes") # nb it might be a immutabledict or a dict if not isinstance(hashes, collections.abc.Mapping): raise SynapseError( 400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED ) if name not in hashes: raise SynapseError( 400, "Algorithm %s not in hashes %s" % (name, list(hashes)), Codes.UNAUTHORIZED, ) message_hash_base64 = hashes[name] try: message_hash_bytes = decode_base64(message_hash_base64) except Exception: raise SynapseError( 400, "Invalid base64: %s" % (message_hash_base64,), Codes.UNAUTHORIZED ) return message_hash_bytes == expected_hash
Compute the content hash of an event, which is the hash of the unredacted event. Args: event_dict: The unredacted event as a dict hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use to hash the event Returns: A tuple of the name of hash and the hash as raw bytes.
def compute_content_hash( event_dict: Dict[str, Any], hash_algorithm: Hasher ) -> Tuple[str, bytes]: """Compute the content hash of an event, which is the hash of the unredacted event. Args: event_dict: The unredacted event as a dict hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use to hash the event Returns: A tuple of the name of hash and the hash as raw bytes. """ event_dict = dict(event_dict) event_dict.pop("age_ts", None) event_dict.pop("unsigned", None) event_dict.pop("signatures", None) event_dict.pop("hashes", None) event_dict.pop("outlier", None) event_dict.pop("destinations", None) event_json_bytes = encode_canonical_json(event_dict) hashed = hash_algorithm(event_json_bytes) return hashed.name, hashed.digest()
Computes the event reference hash. This is the hash of the redacted event. Args: event hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use to hash the event Returns: A tuple of the name of hash and the hash as raw bytes.
def compute_event_reference_hash( event: EventBase, hash_algorithm: Hasher = hashlib.sha256 ) -> Tuple[str, bytes]: """Computes the event reference hash. This is the hash of the redacted event. Args: event hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use to hash the event Returns: A tuple of the name of hash and the hash as raw bytes. """ tmp_event = prune_event(event) event_dict = tmp_event.get_pdu_json() event_dict.pop("signatures", None) event_dict.pop("age_ts", None) event_dict.pop("unsigned", None) event_json_bytes = encode_canonical_json(event_dict) hashed = hash_algorithm(event_json_bytes) return hashed.name, hashed.digest()
Compute the signature of the event for the given name and key. Args: room_version: the version of the room that this event is in. (the room version determines the redaction algorithm and hence the json to be signed) event_dict: The event as a dict signature_name: The name of the entity signing the event (typically the server's hostname). signing_key: The key to sign with Returns: a dictionary in the same format of an event's signatures field.
def compute_event_signature( room_version: RoomVersion, event_dict: JsonDict, signature_name: str, signing_key: SigningKey, ) -> Dict[str, Dict[str, str]]: """Compute the signature of the event for the given name and key. Args: room_version: the version of the room that this event is in. (the room version determines the redaction algorithm and hence the json to be signed) event_dict: The event as a dict signature_name: The name of the entity signing the event (typically the server's hostname). signing_key: The key to sign with Returns: a dictionary in the same format of an event's signatures field. """ redact_json = prune_event_dict(room_version, event_dict) redact_json.pop("age_ts", None) redact_json.pop("unsigned", None) if logger.isEnabledFor(logging.DEBUG): logger.debug("Signing event: %s", encode_canonical_json(redact_json)) redact_json = sign_json(redact_json, signature_name, signing_key) if logger.isEnabledFor(logging.DEBUG): logger.debug("Signed event: %s", encode_canonical_json(redact_json)) return redact_json["signatures"]
Add content hash and sign the event Args: room_version: the version of the room this event is in event_dict: The event to add hashes to and sign signature_name: The name of the entity signing the event (typically the server's hostname). signing_key: The key to sign with
def add_hashes_and_signatures( room_version: RoomVersion, event_dict: JsonDict, signature_name: str, signing_key: SigningKey, ) -> None: """Add content hash and sign the event Args: room_version: the version of the room this event is in event_dict: The event to add hashes to and sign signature_name: The name of the entity signing the event (typically the server's hostname). signing_key: The key to sign with """ name, digest = compute_content_hash(event_dict, hash_algorithm=hashlib.sha256) event_dict.setdefault("hashes", {})[name] = encode_base64(digest) event_dict["signatures"] = compute_event_signature( room_version, event_dict, signature_name=signature_name, signing_key=signing_key )
Takes a fully formed event dict, ensuring that fields like `origin` and `origin_server_ts` have correct values for a locally produced event, then signs and hashes it.
def create_local_event_from_event_dict( clock: Clock, hostname: str, signing_key: SigningKey, room_version: RoomVersion, event_dict: JsonDict, internal_metadata_dict: Optional[JsonDict] = None, ) -> EventBase: """Takes a fully formed event dict, ensuring that fields like `origin` and `origin_server_ts` have correct values for a locally produced event, then signs and hashes it. """ format_version = room_version.event_format if format_version not in KNOWN_EVENT_FORMAT_VERSIONS: raise Exception("No event format defined for version %r" % (format_version,)) if internal_metadata_dict is None: internal_metadata_dict = {} time_now = int(clock.time_msec()) if format_version == EventFormatVersions.ROOM_V1_V2: event_dict["event_id"] = _create_event_id(clock, hostname) event_dict["origin"] = hostname event_dict.setdefault("origin_server_ts", time_now) event_dict.setdefault("unsigned", {}) age = event_dict["unsigned"].pop("age", 0) event_dict["unsigned"].setdefault("age_ts", time_now - age) event_dict.setdefault("signatures", {}) add_hashes_and_signatures(room_version, event_dict, hostname, signing_key) return make_event_from_dict( event_dict, room_version, internal_metadata_dict=internal_metadata_dict )
Create a new event ID Args: clock hostname: The server name for the event ID Returns: The new event ID
def _create_event_id(clock: Clock, hostname: str) -> str: """Create a new event ID Args: clock hostname: The server name for the event ID Returns: The new event ID """ global _event_id_counter i = str(_event_id_counter) _event_id_counter += 1 local_part = str(int(clock.time())) + i + random_string(5) e_id = EventID(local_part, hostname) return e_id.to_string()
Wrapper that loads a presence router module configured using the old configuration, and registers the hooks they implement.
def load_legacy_presence_router(hs: "HomeServer") -> None: """Wrapper that loads a presence router module configured using the old configuration, and registers the hooks they implement. """ if hs.config.server.presence_router_module_class is None: return module = hs.config.server.presence_router_module_class config = hs.config.server.presence_router_config api = hs.get_module_api() presence_router = module(config=config, module_api=api) # The known hooks. If a module implements a method which name appears in this set, # we'll want to register it. presence_router_methods = { "get_users_for_states", "get_interested_users", } # All methods that the module provides should be async, but this wasn't enforced # in the old module system, so we wrap them if needed def async_wrapper( f: Optional[Callable[P, R]] ) -> Optional[Callable[P, Awaitable[R]]]: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. if f is None: return None def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]: # Assertion required because mypy can't prove we won't change `f` # back to `None`. See # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions assert f is not None return maybe_awaitable(f(*args, **kwargs)) return run # Register the hooks through the module API. hooks: Dict[str, Optional[Callable[..., Any]]] = { hook: async_wrapper(getattr(presence_router, hook, None)) for hook in presence_router_methods } api.register_presence_router_callbacks(**hooks)
Since dicts of (type, state_key) -> event_id cannot be serialized in JSON we need to convert them to a form that can.
def _encode_state_dict( state_dict: Optional[StateMap[str]], ) -> Optional[List[Tuple[str, str, str]]]: """Since dicts of (type, state_key) -> event_id cannot be serialized in JSON we need to convert them to a form that can. """ if state_dict is None: return None return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()]
Decodes a state dict encoded using `_encode_state_dict` above
def _decode_state_dict( input: Optional[List[Tuple[str, str, str]]] ) -> Optional[StateMap[str]]: """Decodes a state dict encoded using `_encode_state_dict` above""" if input is None: return None return immutabledict({(etype, state_key): v for etype, state_key, v in input})
Returns a pruned version of the given event, which removes all keys we don't know about or think could potentially be dodgy. This is used when we "redact" an event. We want to remove all fields that the user has specified, but we do want to keep necessary information like type, state_key etc.
def prune_event(event: EventBase) -> EventBase: """Returns a pruned version of the given event, which removes all keys we don't know about or think could potentially be dodgy. This is used when we "redact" an event. We want to remove all fields that the user has specified, but we do want to keep necessary information like type, state_key etc. """ pruned_event_dict = prune_event_dict(event.room_version, event.get_dict()) from . import make_event_from_dict pruned_event = make_event_from_dict( pruned_event_dict, event.room_version, event.internal_metadata.get_dict() ) # copy the internal fields pruned_event.internal_metadata.stream_ordering = ( event.internal_metadata.stream_ordering ) pruned_event.internal_metadata.outlier = event.internal_metadata.outlier # Mark the event as redacted pruned_event.internal_metadata.redacted = True return pruned_event
Redacts the event_dict in the same way as `prune_event`, except it operates on dicts rather than event objects Returns: A copy of the pruned event dict
def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict: """Redacts the event_dict in the same way as `prune_event`, except it operates on dicts rather than event objects Returns: A copy of the pruned event dict """ allowed_keys = [ "event_id", "sender", "room_id", "hashes", "signatures", "content", "type", "state_key", "depth", "prev_events", "auth_events", "origin_server_ts", ] # Earlier room versions from had additional allowed keys. if not room_version.updated_redaction_rules: allowed_keys.extend(["prev_state", "membership", "origin"]) event_type = event_dict["type"] new_content = {} def add_fields(*fields: str) -> None: for field in fields: if field in event_dict["content"]: new_content[field] = event_dict["content"][field] if event_type == EventTypes.Member: add_fields("membership") if room_version.restricted_join_rule_fix: add_fields(EventContentFields.AUTHORISING_USER) if room_version.updated_redaction_rules: # Preserve the signed field under third_party_invite. third_party_invite = event_dict["content"].get("third_party_invite") if isinstance(third_party_invite, collections.abc.Mapping): new_content["third_party_invite"] = {} if "signed" in third_party_invite: new_content["third_party_invite"]["signed"] = third_party_invite[ "signed" ] elif event_type == EventTypes.Create: if room_version.updated_redaction_rules: # MSC2176 rules state that create events cannot have their `content` redacted. new_content = event_dict["content"] elif not room_version.implicit_room_creator: # Some room versions give meaning to `creator` add_fields("creator") elif event_type == EventTypes.JoinRules: add_fields("join_rule") if room_version.restricted_join_rule: add_fields("allow") elif event_type == EventTypes.PowerLevels: add_fields( "users", "users_default", "events", "events_default", "state_default", "ban", "kick", "redact", ) if room_version.updated_redaction_rules: add_fields("invite") elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth: add_fields("aliases") elif event_type == EventTypes.RoomHistoryVisibility: add_fields("history_visibility") elif event_type == EventTypes.Redaction and room_version.updated_redaction_rules: add_fields("redacts") # Protect the rel_type and event_id fields under the m.relates_to field. if room_version.msc3389_relation_redactions: relates_to = event_dict["content"].get("m.relates_to") if isinstance(relates_to, collections.abc.Mapping): new_relates_to = {} for field in ("rel_type", "event_id"): if field in relates_to: new_relates_to[field] = relates_to[field] # Only include a non-empty relates_to field. if new_relates_to: new_content["m.relates_to"] = new_relates_to allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys} allowed_fields["content"] = new_content unsigned: JsonDict = {} allowed_fields["unsigned"] = unsigned event_unsigned = event_dict.get("unsigned", {}) if "age_ts" in event_unsigned: unsigned["age_ts"] = event_unsigned["age_ts"] if "replaces_state" in event_unsigned: unsigned["replaces_state"] = event_unsigned["replaces_state"] return allowed_fields
Copy the field in 'src' to 'dst'. For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"] then dst={"foo":{"bar":5}}. Args: src: The dict to read from. dst: The dict to modify. field: List of keys to drill down to in 'src'.
def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None: """Copy the field in 'src' to 'dst'. For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"] then dst={"foo":{"bar":5}}. Args: src: The dict to read from. dst: The dict to modify. field: List of keys to drill down to in 'src'. """ if len(field) == 0: # this should be impossible return if len(field) == 1: # common case e.g. 'origin_server_ts' if field[0] in src: dst[field[0]] = src[field[0]] return # Else is a nested field e.g. 'content.body' # Pop the last field as that's the key to move across and we need the # parent dict in order to access the data. Drill down to the right dict. key_to_move = field.pop(-1) sub_dict = src for sub_field in field: # e.g. sub_field => "content" if sub_field in sub_dict and isinstance( sub_dict[sub_field], collections.abc.Mapping ): sub_dict = sub_dict[sub_field] else: return if key_to_move not in sub_dict: return # Insert the key into the output dictionary, creating nested objects # as required. We couldn't do this any earlier or else we'd need to delete # the empty objects if the key didn't exist. sub_out_dict = dst for sub_field in field: sub_out_dict = sub_out_dict.setdefault(sub_field, {}) sub_out_dict[key_to_move] = sub_dict[key_to_move]
Replacement function; replace a backslash-backslash or backslash-dot with the second character. Leaves any other string alone.
def _escape_slash(m: Match[str]) -> str: """ Replacement function; replace a backslash-backslash or backslash-dot with the second character. Leaves any other string alone. """ if m.group(1) in ("\\", "."): return m.group(1) return m.group(0)
Splits strings on unescaped dots and removes escaping. Args: field: A string representing a path to a field. Returns: A list of nested fields to traverse.
def _split_field(field: str) -> List[str]: """ Splits strings on unescaped dots and removes escaping. Args: field: A string representing a path to a field. Returns: A list of nested fields to traverse. """ # Convert the field and remove escaping: # # 1. "content.body.thing\.with\.dots" # 2. ["content", "body", "thing\.with\.dots"] # 3. ["content", "body", "thing.with.dots"] # Find all dots (and their preceding backslashes). If the dot is unescaped # then emit a new field part. result = [] prev_start = 0 for match in SPLIT_FIELD_REGEX.finditer(field): # If the match is an *even* number of characters than the dot was escaped. if len(match.group()) % 2 == 0: continue # Add a new part (up to the dot, exclusive) after escaping. result.append( ESCAPE_SEQUENCE_PATTERN.sub( _escape_slash, field[prev_start : match.end() - 1] ) ) prev_start = match.end() # Add any part of the field after the last unescaped dot. (Note that if the # character is a dot this correctly adds a blank string.) result.append(re.sub(r"\\(.)", _escape_slash, field[prev_start:])) return result
Return a new dict with only the fields in 'dictionary' which are present in 'fields'. If there are no event fields specified then all fields are included. The entries may include '.' characters to indicate sub-fields. So ['content.body'] will include the 'body' field of the 'content' object. A literal '.' or '' character in a field name may be escaped using a ''. Args: dictionary: The dictionary to read from. fields: A list of fields to copy over. Only shallow refs are taken. Returns: A new dictionary with only the given fields. If fields was empty, the same dictionary is returned.
def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: """Return a new dict with only the fields in 'dictionary' which are present in 'fields'. If there are no event fields specified then all fields are included. The entries may include '.' characters to indicate sub-fields. So ['content.body'] will include the 'body' field of the 'content' object. A literal '.' or '\' character in a field name may be escaped using a '\'. Args: dictionary: The dictionary to read from. fields: A list of fields to copy over. Only shallow refs are taken. Returns: A new dictionary with only the given fields. If fields was empty, the same dictionary is returned. """ if len(fields) == 0: return dictionary # for each field, convert it: # ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]] split_fields = [_split_field(f) for f in fields] output: JsonDict = {} for field_array in split_fields: _copy_field(dictionary, output, field_array) return output
Serialize event for clients Args: e time_now_ms config: Event serialization config Returns: The serialized event dictionary.
def serialize_event( e: Union[JsonDict, EventBase], time_now_ms: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, ) -> JsonDict: """Serialize event for clients Args: e time_now_ms config: Event serialization config Returns: The serialized event dictionary. """ # FIXME(erikj): To handle the case of presence events and the like if not isinstance(e, EventBase): return e time_now_ms = int(time_now_ms) # Should this strip out None's? d = dict(e.get_dict().items()) d["event_id"] = e.event_id if "age_ts" in d["unsigned"]: d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"] del d["unsigned"]["age_ts"] if "redacted_because" in e.unsigned: d["unsigned"]["redacted_because"] = serialize_event( e.unsigned["redacted_because"], time_now_ms, config=config, ) # If we have a txn_id saved in the internal_metadata, we should include it in the # unsigned section of the event if it was sent by the same session as the one # requesting the event. txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None) if ( txn_id is not None and config.requester is not None and config.requester.user.to_string() == e.sender ): # Some events do not have the device ID stored in the internal metadata, # this includes old events as well as those created by appservice, guests, # or with tokens minted with the admin API. For those events, fallback # to using the access token instead. event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None) if event_device_id is not None: if event_device_id == config.requester.device_id: d["unsigned"]["transaction_id"] = txn_id else: # Fallback behaviour: only include the transaction ID if the event # was sent from the same access token. # # For regular users, the access token ID can be used to determine this. # This includes access tokens minted with the admin API. # # For guests and appservice users, we can't check the access token ID # so assume it is the same session. event_token_id: Optional[int] = getattr( e.internal_metadata, "token_id", None ) if ( ( event_token_id is not None and config.requester.access_token_id is not None and event_token_id == config.requester.access_token_id ) or config.requester.is_guest or config.requester.app_service ): d["unsigned"]["transaction_id"] = txn_id # invite_room_state and knock_room_state are a list of stripped room state events # that are meant to provide metadata about a room to an invitee/knocker. They are # intended to only be included in specific circumstances, such as down sync, and # should not be included in any other case. if not config.include_stripped_room_state: d["unsigned"].pop("invite_room_state", None) d["unsigned"].pop("knock_room_state", None) if config.as_client_event: d = config.event_format(d) # If the event is a redaction, the field with the redacted event ID appears # in a different location depending on the room version. e.redacts handles # fetching from the proper location; copy it to the other location for forwards- # and backwards-compatibility with clients. if e.type == EventTypes.Redaction and e.redacts is not None: if e.room_version.updated_redaction_rules: d["redacts"] = e.redacts else: d["content"] = dict(d["content"]) d["content"]["redacts"] = e.redacts only_event_fields = config.only_event_fields if only_event_fields: if not isinstance(only_event_fields, list) or not all( isinstance(f, str) for f in only_event_fields ): raise TypeError("only_event_fields must be a list of strings") d = only_fields(d, only_event_fields) return d
Copy the content of a power_levels event, unfreezing immutabledicts along the way. We accept as input power level values which are strings, provided they represent an integer, e.g. `"`100"` instead of 100. Such strings are converted to integers in the returned dictionary (hence "fixup" in the function name). Note that future room versions will outlaw such stringy power levels (see https://github.com/matrix-org/matrix-spec/issues/853). Raises: TypeError if the input does not look like a valid power levels event content
def copy_and_fixup_power_levels_contents( old_power_levels: PowerLevelsContent, ) -> Dict[str, Union[int, Dict[str, int]]]: """Copy the content of a power_levels event, unfreezing immutabledicts along the way. We accept as input power level values which are strings, provided they represent an integer, e.g. `"`100"` instead of 100. Such strings are converted to integers in the returned dictionary (hence "fixup" in the function name). Note that future room versions will outlaw such stringy power levels (see https://github.com/matrix-org/matrix-spec/issues/853). Raises: TypeError if the input does not look like a valid power levels event content """ if not isinstance(old_power_levels, collections.abc.Mapping): raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,)) power_levels: Dict[str, Union[int, Dict[str, int]]] = {} for k, v in old_power_levels.items(): if isinstance(v, collections.abc.Mapping): h: Dict[str, int] = {} power_levels[k] = h for k1, v1 in v.items(): _copy_power_level_value_as_integer(v1, h, k1) else: _copy_power_level_value_as_integer(v, power_levels, k) return power_levels
Set `power_levels[key]` to the integer represented by `old_value`. :raises TypeError: if `old_value` is neither an integer nor a base-10 string representation of an integer.
def _copy_power_level_value_as_integer( old_value: object, power_levels: MutableMapping[str, Any], key: str, ) -> None: """Set `power_levels[key]` to the integer represented by `old_value`. :raises TypeError: if `old_value` is neither an integer nor a base-10 string representation of an integer. """ if type(old_value) is int: # noqa: E721 power_levels[key] = old_value return if isinstance(old_value, str): try: parsed_value = int(old_value, base=10) except ValueError: # Fall through to the final TypeError. pass else: power_levels[key] = parsed_value return raise TypeError(f"Invalid power_levels value for {key}: {old_value}")
Ensure that the JSON object is valid according to the rules of canonical JSON. See the appendix section 3.1: Canonical JSON. This rejects JSON that has: * An integer outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] * Floats * NaN, Infinity, -Infinity
def validate_canonicaljson(value: Any) -> None: """ Ensure that the JSON object is valid according to the rules of canonical JSON. See the appendix section 3.1: Canonical JSON. This rejects JSON that has: * An integer outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] * Floats * NaN, Infinity, -Infinity """ if type(value) is int: # noqa: E721 if value < CANONICALJSON_MIN_INT or CANONICALJSON_MAX_INT < value: raise SynapseError(400, "JSON integer out of range", Codes.BAD_JSON) elif isinstance(value, float): # Note that Infinity, -Infinity, and NaN are also considered floats. raise SynapseError(400, "Bad JSON value: float", Codes.BAD_JSON) elif isinstance(value, collections.abc.Mapping): for v in value.values(): validate_canonicaljson(v) elif isinstance(value, (list, tuple)): for i in value: validate_canonicaljson(i) elif not isinstance(value, (bool, str)) and value is not None: # Other potential JSON values (bool, None, str) are safe. raise SynapseError(400, "Unknown JSON value", Codes.BAD_JSON)
Upsert an event field, but only if this doesn't make the event too large. Returns true iff the upsert took place.
def maybe_upsert_event_field( event: EventBase, container: JsonDict, key: str, value: object ) -> bool: """Upsert an event field, but only if this doesn't make the event too large. Returns true iff the upsert took place. """ if key in container: old_value: object = container[key] container[key] = value # NB: here and below, we assume that passing a non-None `time_now` argument to # get_pdu_json doesn't increase the size of the encoded result. upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE if not upsert_okay: container[key] = old_value else: container[key] = value upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE if not upsert_okay: del container[key] return upsert_okay
Returns the python type to use to construct an Event object for the given event format version. Args: format_version: The event format version Returns: A type that can be initialized as per the initializer of `FrozenEvent`
def _event_type_from_format_version( format_version: int, ) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]: """Returns the python type to use to construct an Event object for the given event format version. Args: format_version: The event format version Returns: A type that can be initialized as per the initializer of `FrozenEvent` """ if format_version == EventFormatVersions.ROOM_V1_V2: return FrozenEvent elif format_version == EventFormatVersions.ROOM_V3: return FrozenEventV2 elif format_version == EventFormatVersions.ROOM_V4_PLUS: return FrozenEventV3 else: raise Exception("No event format %r" % (format_version,))
Construct an EventBase from the given event dict
def make_event_from_dict( event_dict: JsonDict, room_version: RoomVersion = RoomVersions.V1, internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ) -> EventBase: """Construct an EventBase from the given event dict""" event_type = _event_type_from_format_version(room_version.event_format) return event_type( event_dict, room_version, internal_metadata_dict or {}, rejected_reason )
Attempt to parse relation information an event. Returns: The event relation information, if it is valid. None, otherwise.
def relation_from_event(event: EventBase) -> Optional[_EventRelation]: """ Attempt to parse relation information an event. Returns: The event relation information, if it is valid. None, otherwise. """ relation = event.content.get("m.relates_to") if not relation or not isinstance(relation, collections.abc.Mapping): # No relation information. return None # Relations must have a type and parent event ID. rel_type = relation.get("rel_type") if not isinstance(rel_type, str): return None parent_id = relation.get("event_id") if not isinstance(parent_id, str): return None # Annotations have a key field. aggregation_key = None if rel_type == RelationTypes.ANNOTATION: aggregation_key = relation.get("key") if not isinstance(aggregation_key, str): aggregation_key = None return _EventRelation(parent_id, rel_type, aggregation_key)
Construct an EventBase from an event json received over federation Args: pdu_json: pdu as received over federation room_version: The version of the room this event belongs to Raises: SynapseError: if the pdu is missing required fields or is otherwise not a valid matrix event
def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase: """Construct an EventBase from an event json received over federation Args: pdu_json: pdu as received over federation room_version: The version of the room this event belongs to Raises: SynapseError: if the pdu is missing required fields or is otherwise not a valid matrix event """ # we could probably enforce a bunch of other fields here (room_id, sender, # origin, etc etc) assert_params_in_dict(pdu_json, ("type", "depth")) # Strip any unauthorized values from "unsigned" if they exist if "unsigned" in pdu_json: _strip_unsigned_values(pdu_json) depth = pdu_json["depth"] if type(depth) is not int: # noqa: E721 raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON) if depth < 0: raise SynapseError(400, "Depth too small", Codes.BAD_JSON) elif depth > MAX_DEPTH: raise SynapseError(400, "Depth too large", Codes.BAD_JSON) # Validate that the JSON conforms to the specification. if room_version.strict_canonicaljson: validate_canonicaljson(pdu_json) event = make_event_from_dict(pdu_json, room_version) return event
Strip any unsigned values unless specifically allowed, as defined by the whitelist. pdu: the json dict to strip values from. Note that the dict is mutated by this function
def _strip_unsigned_values(pdu_dict: JsonDict) -> None: """ Strip any unsigned values unless specifically allowed, as defined by the whitelist. pdu: the json dict to strip values from. Note that the dict is mutated by this function """ unsigned = pdu_dict["unsigned"] if not isinstance(unsigned, dict): pdu_dict["unsigned"] = {} if pdu_dict["type"] == "m.room.member": whitelist = ["knock_room_state", "invite_room_state", "age"] else: whitelist = ["age"] filtered_unsigned = {k: v for k, v in unsigned.items() if k in whitelist} pdu_dict["unsigned"] = filtered_unsigned
Validate an event within the result of a /hierarchy request Args: d: json object to be parsed Raises: ValueError if d is not a valid event
def _validate_hierarchy_event(d: JsonDict) -> None: """Validate an event within the result of a /hierarchy request Args: d: json object to be parsed Raises: ValueError if d is not a valid event """ event_type = d.get("type") if not isinstance(event_type, str): raise ValueError("Invalid event: 'event_type' must be a str") state_key = d.get("state_key") if not isinstance(state_key, str): raise ValueError("Invalid event: 'state_key' must be a str") content = d.get("content") if not isinstance(content, dict): raise ValueError("Invalid event: 'content' must be a dict") via = content.get("via") if not isinstance(via, list): raise ValueError("Invalid event: 'via' must be a list") if any(not isinstance(v, str) for v in via): raise ValueError("Invalid event: 'via' must be a list of strings")
Calculate state to be returned in a partial_state send_join Args: join_event: the join event being send_joined prev_state_ids: the event ids of the state before the join Returns: the event ids to be returned
def _get_event_ids_for_partial_state_join( join_event: EventBase, prev_state_ids: StateMap[str], summary: Mapping[str, MemberSummary], ) -> Collection[str]: """Calculate state to be returned in a partial_state send_join Args: join_event: the join event being send_joined prev_state_ids: the event ids of the state before the join Returns: the event ids to be returned """ # return all non-member events state_event_ids = { event_id for (event_type, state_key), event_id in prev_state_ids.items() if event_type != EventTypes.Member } # we also need the current state of the current user (it's going to # be an auth event for the new join, so we may as well return it) current_membership_event_id = prev_state_ids.get( (EventTypes.Member, join_event.state_key) ) if current_membership_event_id is not None: state_event_ids.add(current_membership_event_id) name_id = prev_state_ids.get((EventTypes.Name, "")) canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, "")) if not name_id and not canonical_alias_id: # Also include the hero members of the room (for DM rooms without a title). # To do this properly, we should select the correct subset of membership events # from `prev_state_ids`. Instead, we are lazier and use the (cached) # `get_room_summary` function, which is based on the current state of the room. # This introduces races; we choose to ignore them because a) they should be rare # and b) even if it's wrong, joining servers will get the full state eventually. heroes = extract_heroes_from_room_summary(summary, join_event.state_key) for hero in heroes: membership_event_id = prev_state_ids.get((EventTypes.Member, hero)) if membership_event_id: state_event_ids.add(membership_event_id) return state_event_ids
Ensures that all args are url encoded.
def _create_path(federation_prefix: str, path: str, *args: str) -> str: """ Ensures that all args are url encoded. """ return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
Creates a path against V1 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v1_path("/event/%s", event_id) Args: path: String template for the path args: Args to insert into path. Each arg will be url encoded
def _create_v1_path(path: str, *args: str) -> str: """Creates a path against V1 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v1_path("/event/%s", event_id) Args: path: String template for the path args: Args to insert into path. Each arg will be url encoded """ return _create_path(FEDERATION_V1_PREFIX, path, *args)
Creates a path against V2 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v2_path("/event/%s", event_id) Args: path: String template for the path args: Args to insert into path. Each arg will be url encoded
def _create_v2_path(path: str, *args: str) -> str: """Creates a path against V2 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v2_path("/event/%s", event_id) Args: path: String template for the path args: Args to insert into path. Each arg will be url encoded """ return _create_path(FEDERATION_V2_PREFIX, path, *args)
Helper function for use with `ijson.kvitems_coro` to parse key-value pairs to add them to a given dictionary.
def _event_parser(event_dict: JsonDict) -> Generator[None, Tuple[str, Any], None]: """Helper function for use with `ijson.kvitems_coro` to parse key-value pairs to add them to a given dictionary. """ while True: key, value = yield event_dict[key] = value
Helper function for use with `ijson.items_coro` to parse an array of events and add them to the given list.
def _event_list_parser( room_version: RoomVersion, events: List[EventBase] ) -> Generator[None, JsonDict, None]: """Helper function for use with `ijson.items_coro` to parse an array of events and add them to the given list. """ while True: obj = yield event = make_event_from_dict(obj, room_version) events.append(event)
Helper function for use with `ijson.items_coro` Parses the members_omitted field in send_join responses
def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any, None]: """Helper function for use with `ijson.items_coro` Parses the members_omitted field in send_join responses """ while True: val = yield if not isinstance(val, bool): raise TypeError("members_omitted must be a boolean") response.members_omitted = val
Helper function for use with `ijson.items_coro` Parses the servers_in_room field in send_join responses
def _servers_in_room_parser(response: SendJoinResponse) -> Generator[None, Any, None]: """Helper function for use with `ijson.items_coro` Parses the servers_in_room field in send_join responses """ while True: val = yield if not isinstance(val, list) or any(not isinstance(x, str) for x in val): raise TypeError("servers_in_room must be a list of strings") response.servers_in_room = val
Close each of the given coroutines. Always calls .close() on each coroutine, even if doing so raises an exception. Any exceptions raised are aggregated into an ExceptionBundle. :raises ExceptionBundle: if at least one coroutine fails to close.
def _close_coros(coros: Iterable[Generator[None, bytes, None]]) -> None: """Close each of the given coroutines. Always calls .close() on each coroutine, even if doing so raises an exception. Any exceptions raised are aggregated into an ExceptionBundle. :raises ExceptionBundle: if at least one coroutine fails to close. """ exceptions = [] for c in coros: try: c.close() except Exception as e: exceptions.append(e) if exceptions: # raise from the first exception so that the traceback has slightly more context raise ExceptionBundle( f"There were {len(exceptions)} errors closing coroutines", exceptions ) from exceptions[0]
Parse an X-Matrix auth header Args: header_bytes: header value Returns: origin, key id, signature, destination. origin, key id, signature. Raises: AuthenticationError if the header could not be parsed
def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str]]: """Parse an X-Matrix auth header Args: header_bytes: header value Returns: origin, key id, signature, destination. origin, key id, signature. Raises: AuthenticationError if the header could not be parsed """ try: header_str = header_bytes.decode("utf-8") params = re.split(" +", header_str)[1].split(",") param_dict: Dict[str, str] = { k.lower(): v for k, v in [param.split("=", maxsplit=1) for param in params] } def strip_quotes(value: str) -> str: if value.startswith('"'): return re.sub( "\\\\(.)", lambda matchobj: matchobj.group(1), value[1:-1] ) else: return value origin = strip_quotes(param_dict["origin"]) # ensure that the origin is a valid server name parse_and_validate_server_name(origin) key = strip_quotes(param_dict["key"]) sig = strip_quotes(param_dict["sig"]) # get the destination server_name from the auth header if it exists destination = param_dict.get("destination") if destination is not None: destination = strip_quotes(destination) else: destination = None return origin, key, sig, destination except Exception as e: logger.warning( "Error parsing auth header '%s': %s", header_bytes.decode("ascii", "replace"), e, ) raise AuthenticationError( HTTPStatus.BAD_REQUEST, "Malformed Authorization header", Codes.UNAUTHORIZED )
Initialize and register servlet classes. Will by default register all servlets. For custom behaviour, pass in a list of servlet_groups to register. Args: hs: homeserver resource: resource class to register to authenticator: authenticator to use ratelimiter: ratelimiter to use servlet_groups: List of servlet groups to register. Defaults to ``DEFAULT_SERVLET_GROUPS``.
def register_servlets( hs: "HomeServer", resource: HttpServer, authenticator: Authenticator, ratelimiter: FederationRateLimiter, servlet_groups: Optional[Iterable[str]] = None, ) -> None: """Initialize and register servlet classes. Will by default register all servlets. For custom behaviour, pass in a list of servlet_groups to register. Args: hs: homeserver resource: resource class to register to authenticator: authenticator to use ratelimiter: ratelimiter to use servlet_groups: List of servlet groups to register. Defaults to ``DEFAULT_SERVLET_GROUPS``. """ if not servlet_groups: servlet_groups = SERVLET_GROUPS.keys() for servlet_group in servlet_groups: # Skip unknown servlet groups. if servlet_group not in SERVLET_GROUPS: raise RuntimeError( f"Attempting to register unknown federation servlet: '{servlet_group}'" ) for servletclass in SERVLET_GROUPS[servlet_group]: # Only allow the `/account_status` servlet if msc3720 is enabled if ( servletclass == FederationAccountStatusServlet and not hs.config.experimental.msc3720_enabled ): continue if ( servletclass == FederationUnstableClientKeysClaimServlet and not hs.config.experimental.msc3983_appservice_otk_claims ): continue servletclass( hs=hs, authenticator=authenticator, ratelimiter=ratelimiter, server_name=hs.hostname, ).register(resource)
Convert a legacy-formatted login submission to an identifier dict. Legacy login submissions (used in both login and user-interactive authentication) provide user-identifying information at the top-level instead. These are now deprecated and replaced with identifiers: https://matrix.org/docs/spec/client_server/r0.6.1#identifier-types Args: submission: The client dict to convert Returns: The matching identifier dict Raises: SynapseError: If the format of the client dict is invalid
def convert_client_dict_legacy_fields_to_identifier( submission: JsonDict, ) -> Dict[str, str]: """ Convert a legacy-formatted login submission to an identifier dict. Legacy login submissions (used in both login and user-interactive authentication) provide user-identifying information at the top-level instead. These are now deprecated and replaced with identifiers: https://matrix.org/docs/spec/client_server/r0.6.1#identifier-types Args: submission: The client dict to convert Returns: The matching identifier dict Raises: SynapseError: If the format of the client dict is invalid """ identifier = submission.get("identifier", {}) # Generate an m.id.user identifier if "user" parameter is present user = submission.get("user") if user: identifier = {"type": "m.id.user", "user": user} # Generate an m.id.thirdparty identifier if "medium" and "address" parameters are present medium = submission.get("medium") address = submission.get("address") if medium and address: identifier = { "type": "m.id.thirdparty", "medium": medium, "address": address, } # We've converted valid, legacy login submissions to an identifier. If the # submission still doesn't have an identifier, it's invalid if not identifier: raise SynapseError(400, "Invalid login submission", Codes.INVALID_PARAM) # Ensure the identifier has a type if "type" not in identifier: raise SynapseError( 400, "'identifier' dict has no key 'type'", errcode=Codes.MISSING_PARAM, ) return identifier
Convert a phone login identifier type to a generic threepid identifier. Args: identifier: Login identifier dict of type 'm.id.phone' Returns: An equivalent m.id.thirdparty identifier dict
def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: """ Convert a phone login identifier type to a generic threepid identifier. Args: identifier: Login identifier dict of type 'm.id.phone' Returns: An equivalent m.id.thirdparty identifier dict """ if "country" not in identifier or ( # The specification requires a "phone" field, while Synapse used to require a "number" # field. Accept both for backwards compatibility. "phone" not in identifier and "number" not in identifier ): raise SynapseError( 400, "Invalid phone-type identifier", errcode=Codes.INVALID_PARAM ) # Accept both "phone" and "number" as valid keys in m.id.phone phone_number = identifier.get("phone", identifier["number"]) # Convert user-provided phone number to a consistent representation msisdn = phone_number_to_msisdn(identifier["country"], phone_number) return { "type": "m.id.thirdparty", "medium": "msisdn", "address": msisdn, }
Check a cross-signing key uploaded by a user. Performs some basic sanity checking, and ensures that it is signed, if a signature is required. Args: key: the key data to verify user_id: the user whose key is being checked key_type: the type of key that the key should be signing_key: the signing key that the key should be signed with. If omitted, signatures will not be checked.
def _check_cross_signing_key( key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None ) -> None: """Check a cross-signing key uploaded by a user. Performs some basic sanity checking, and ensures that it is signed, if a signature is required. Args: key: the key data to verify user_id: the user whose key is being checked key_type: the type of key that the key should be signing_key: the signing key that the key should be signed with. If omitted, signatures will not be checked. """ if ( key.get("user_id") != user_id or key_type not in key.get("usage", []) or len(key.get("keys", {})) != 1 ): raise SynapseError(400, ("Invalid %s key" % (key_type,)), Codes.INVALID_PARAM) if signing_key: try: verify_signed_json(key, user_id, signing_key) except SignatureVerifyException: raise SynapseError( 400, ("Invalid signature on %s key" % key_type), Codes.INVALID_SIGNATURE )
Check that a signature on a device or cross-signing key is correct and matches the copy of the device/key that we have stored. Throws an exception if an error is detected. Args: user_id: the user ID whose signature is being checked verify_key: the key to verify the device with signed_device: the uploaded signed device data stored_device: our previously stored copy of the device Raises: SynapseError: if the signature was invalid or the sent device is not the same as the stored device
def _check_device_signature( user_id: str, verify_key: VerifyKey, signed_device: JsonDict, stored_device: JsonMapping, ) -> None: """Check that a signature on a device or cross-signing key is correct and matches the copy of the device/key that we have stored. Throws an exception if an error is detected. Args: user_id: the user ID whose signature is being checked verify_key: the key to verify the device with signed_device: the uploaded signed device data stored_device: our previously stored copy of the device Raises: SynapseError: if the signature was invalid or the sent device is not the same as the stored device """ # make sure that the device submitted matches what we have stored stripped_signed_device = { k: v for k, v in signed_device.items() if k not in ["signatures", "unsigned"] } stripped_stored_device = { k: v for k, v in stored_device.items() if k not in ["signatures", "unsigned"] } if stripped_signed_device != stripped_stored_device: logger.debug( "upload signatures: key does not match %s vs %s", signed_device, stored_device, ) raise SynapseError(400, "Key does not match") try: verify_signed_json(signed_device, user_id, verify_key) except SignatureVerifyException: logger.debug("invalid signature on key") raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
Work out the order in which we should ask servers to resync events. If an `initial_destination` is given, it takes top priority. Otherwise all servers are treated equally. :raises ValueError: if no destination is provided at all.
def _prioritise_destinations_for_partial_state_resync( initial_destination: Optional[str], other_destinations: AbstractSet[str], room_id: str, ) -> StrCollection: """Work out the order in which we should ask servers to resync events. If an `initial_destination` is given, it takes top priority. Otherwise all servers are treated equally. :raises ValueError: if no destination is provided at all. """ if initial_destination is None and len(other_destinations) == 0: raise ValueError(f"Cannot resync state of {room_id}: no destinations provided") if initial_destination is None: return other_destinations # Move `initial_destination` to the front of the list. destinations = list(other_destinations) if initial_destination in destinations: destinations.remove(initial_destination) destinations = [initial_destination] + destinations return destinations
Create an Authorization header for passing to SimpleHttpClient as the header value of an HTTP request. Args: id_access_token: An identity server access token. Returns: The ascii-encoded bearer token encased in a list.
def create_id_access_token_header(id_access_token: str) -> List[str]: """Create an Authorization header for passing to SimpleHttpClient as the header value of an HTTP request. Args: id_access_token: An identity server access token. Returns: The ascii-encoded bearer token encased in a list. """ # Prefix with Bearer bearer_token = "Bearer %s" % id_access_token # Encode headers to standard ascii bearer_token.encode("ascii") # Return as a list as that's how SimpleHttpClient takes header values return [bearer_token]
Decides if a presence state change should be sent to interested parties.
def should_notify( old_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool ) -> bool: """Decides if a presence state change should be sent to interested parties.""" user_location = "remote" if is_mine: user_location = "local" if old_state == new_state: return False if old_state.status_msg != new_state.status_msg: notify_reason_counter.labels(user_location, "status_msg_change").inc() return True if old_state.state != new_state.state: notify_reason_counter.labels(user_location, "state_change").inc() state_transition_counter.labels( user_location, old_state.state, new_state.state ).inc() return True if old_state.state == PresenceState.ONLINE: if new_state.currently_active != old_state.currently_active: notify_reason_counter.labels(user_location, "current_active_change").inc() return True if ( new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY ): # Only notify about last active bumps if we're not currently active if not new_state.currently_active: notify_reason_counter.labels( user_location, "last_active_change_online" ).inc() return True elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Always notify for a transition where last active gets bumped. notify_reason_counter.labels( user_location, "last_active_change_not_online" ).inc() return True return False
Convert UserPresenceState to a JSON format that can be sent down to clients and to other servers. Args: state: The user presence state to format. now: The current timestamp since the epoch in ms. include_user_id: Whether to include `user_id` in the returned dictionary. As this function can be used both to format presence updates for client /sync responses and for federation /send requests, only the latter needs the include the `user_id` field. Returns: A JSON dictionary with the following keys: * presence: The presence state as a str. * user_id: Optional. Included if `include_user_id` is truthy. The canonical Matrix ID of the user. * last_active_ago: Optional. Included if `last_active_ts` is set on `state`. The timestamp that the user was last active. * status_msg: Optional. Included if `status_msg` is set on `state`. The user's status. * currently_active: Optional. Included only if `state.state` is "online". Example: { "presence": "online", "user_id": "@alice:example.com", "last_active_ago": 16783813918, "status_msg": "Hello world!", "currently_active": True }
def format_user_presence_state( state: UserPresenceState, now: int, include_user_id: bool = True ) -> JsonDict: """Convert UserPresenceState to a JSON format that can be sent down to clients and to other servers. Args: state: The user presence state to format. now: The current timestamp since the epoch in ms. include_user_id: Whether to include `user_id` in the returned dictionary. As this function can be used both to format presence updates for client /sync responses and for federation /send requests, only the latter needs the include the `user_id` field. Returns: A JSON dictionary with the following keys: * presence: The presence state as a str. * user_id: Optional. Included if `include_user_id` is truthy. The canonical Matrix ID of the user. * last_active_ago: Optional. Included if `last_active_ts` is set on `state`. The timestamp that the user was last active. * status_msg: Optional. Included if `status_msg` is set on `state`. The user's status. * currently_active: Optional. Included only if `state.state` is "online". Example: { "presence": "online", "user_id": "@alice:example.com", "last_active_ago": 16783813918, "status_msg": "Hello world!", "currently_active": True } """ content: JsonDict = {"presence": state.state} if include_user_id: content["user_id"] = state.user_id if state.last_active_ts: content["last_active_ago"] = now - state.last_active_ts if state.status_msg: content["status_msg"] = state.status_msg if state.state == PresenceState.ONLINE: content["currently_active"] = state.currently_active return content
Checks the presence of users that have timed out and updates as appropriate. Args: user_states: List of UserPresenceState's to check. is_mine_fn: Function that returns if a user_id is ours syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. user_to_devices: A map of user ID to device ID to UserDevicePresenceState. now: Current time in ms. Returns: List of UserPresenceState updates
def handle_timeouts( user_states: List[UserPresenceState], is_mine_fn: Callable[[str], bool], syncing_user_devices: AbstractSet[Tuple[str, Optional[str]]], user_to_devices: Dict[str, Dict[Optional[str], UserDevicePresenceState]], now: int, ) -> List[UserPresenceState]: """Checks the presence of users that have timed out and updates as appropriate. Args: user_states: List of UserPresenceState's to check. is_mine_fn: Function that returns if a user_id is ours syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. user_to_devices: A map of user ID to device ID to UserDevicePresenceState. now: Current time in ms. Returns: List of UserPresenceState updates """ changes = {} # Actual changes we need to notify people about for state in user_states: user_id = state.user_id is_mine = is_mine_fn(user_id) new_state = handle_timeout( state, is_mine, syncing_user_devices, user_to_devices.get(user_id, {}), now, ) if new_state: changes[state.user_id] = new_state return list(changes.values())
Checks the presence of the user to see if any of the timers have elapsed Args: state: UserPresenceState to check. is_mine: Whether the user is ours syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. user_devices: A map of device ID to UserDevicePresenceState. now: Current time in ms. Returns: A UserPresenceState update or None if no update.
def handle_timeout( state: UserPresenceState, is_mine: bool, syncing_device_ids: AbstractSet[Tuple[str, Optional[str]]], user_devices: Dict[Optional[str], UserDevicePresenceState], now: int, ) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed Args: state: UserPresenceState to check. is_mine: Whether the user is ours syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. user_devices: A map of device ID to UserDevicePresenceState. now: Current time in ms. Returns: A UserPresenceState update or None if no update. """ if state.state == PresenceState.OFFLINE: # No timeouts are associated with offline states. return None changed = False if is_mine: # Check per-device whether the device should be considered idle or offline # due to timeouts. device_changed = False offline_devices = [] for device_id, device_state in user_devices.items(): if device_state.state == PresenceState.ONLINE: if now - device_state.last_active_ts > IDLE_TIMER: # Currently online, but last activity ages ago so auto # idle device_state.state = PresenceState.UNAVAILABLE device_changed = True # If there are have been no sync for a while (and none ongoing), # set presence to offline. if (state.user_id, device_id) not in syncing_device_ids: # If the user has done something recently but hasn't synced, # don't set them as offline. sync_or_active = max( device_state.last_sync_ts, device_state.last_active_ts ) # Implementations aren't meant to timeout a device with a busy # state, but it needs to timeout *eventually* or else the user # will be stuck in that state. online_timeout = ( BUSY_ONLINE_TIMEOUT if device_state.state == PresenceState.BUSY else SYNC_ONLINE_TIMEOUT ) if now - sync_or_active > online_timeout: # Mark the device as going offline. offline_devices.append(device_id) device_changed = True # Offline devices are not needed and do not add information. for device_id in offline_devices: user_devices.pop(device_id) # If the presence state of the devices changed, then (maybe) update # the user's overall presence state. if device_changed: new_presence = _combine_device_states(user_devices.values()) if new_presence != state.state: state = state.copy_and_replace(state=new_presence) changed = True if now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: # So that we send down a notification that we've # stopped updating. changed = True if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL: # Need to send ping to other servers to ensure they don't # timeout and set us to offline changed = True else: # We expect to be poked occasionally by the other side. # This is to protect against forgetful/buggy servers, so that # no one gets stuck online forever. if now - state.last_federation_update_ts > FEDERATION_TIMEOUT: # The other side seems to have disappeared. state = state.copy_and_replace(state=PresenceState.OFFLINE) changed = True return state if changed else None
Given a presence update: 1. Add any appropriate timers. 2. Check if we should notify anyone. Args: prev_state new_state is_mine: Whether the user is ours wheel_timer now: Time now in ms persist: True if this state should persist until another update occurs. Skips insertion into wheel timers. Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: - new_state: is the state to actually persist - persist_and_notify: whether to persist and notify people - federation_ping: whether we should send a ping over federation
def handle_update( prev_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool, wheel_timer: WheelTimer, now: int, persist: bool, ) -> Tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. 2. Check if we should notify anyone. Args: prev_state new_state is_mine: Whether the user is ours wheel_timer now: Time now in ms persist: True if this state should persist until another update occurs. Skips insertion into wheel timers. Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: - new_state: is the state to actually persist - persist_and_notify: whether to persist and notify people - federation_ping: whether we should send a ping over federation """ user_id = new_state.user_id persist_and_notify = False federation_ping = False # If the users are ours then we want to set up a bunch of timers # to time things out. if is_mine: if new_state.state == PresenceState.ONLINE: # Idle timer if not persist: wheel_timer.insert( now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER ) active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY new_state = new_state.copy_and_replace(currently_active=active) if active and not persist: wheel_timer.insert( now=now, obj=user_id, then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY, ) if new_state.state != PresenceState.OFFLINE: # User has stopped syncing if not persist: wheel_timer.insert( now=now, obj=user_id, then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, ) last_federate = new_state.last_federation_update_ts if now - last_federate > FEDERATION_PING_INTERVAL: # Been a while since we've poked remote servers new_state = new_state.copy_and_replace(last_federation_update_ts=now) federation_ping = True if new_state.state == PresenceState.BUSY and not persist: wheel_timer.insert( now=now, obj=user_id, then=new_state.last_user_sync_ts + BUSY_ONLINE_TIMEOUT, ) else: # An update for a remote user was received. if not persist: wheel_timer.insert( now=now, obj=user_id, then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT, ) # Check whether the change was something worth notifying about if should_notify(prev_state, new_state, is_mine): new_state = new_state.copy_and_replace(last_federation_update_ts=now) persist_and_notify = True return new_state, persist_and_notify, federation_ping
Find the device to use presence information from. Orders devices by priority, then last_active_ts. Args: device_states: An iterable of device presence states Return: The combined presence state.
def _combine_device_states( device_states: Iterable[UserDevicePresenceState], ) -> str: """ Find the device to use presence information from. Orders devices by priority, then last_active_ts. Args: device_states: An iterable of device presence states Return: The combined presence state. """ # Based on (all) the user's devices calculate the new presence state. presence = PresenceState.OFFLINE last_active_ts = -1 # Find the device to use the presence state of based on the presence priority, # but tie-break with how recently the device has been seen. for device_state in device_states: if (PRESENCE_BY_PRIORITY[device_state.state], device_state.last_active_ts) > ( PRESENCE_BY_PRIORITY[presence], last_active_ts, ): presence = device_state.state last_active_ts = device_state.last_active_ts return presence
Check if the given actions are spec compliant. Args: actions: the actions to check. Raises: InvalidRuleException if the rules aren't compliant with the spec.
def check_actions(actions: List[Union[str, JsonDict]]) -> None: """Check if the given actions are spec compliant. Args: actions: the actions to check. Raises: InvalidRuleException if the rules aren't compliant with the spec. """ if not isinstance(actions, list): raise InvalidRuleException("No actions found") for a in actions: # "dont_notify" and "coalesce" are legacy actions. They are allowed, but # ignored (resulting in no action from the pusher). if a in ["notify", "dont_notify", "coalesce"]: pass elif isinstance(a, dict) and "set_tweak" in a: pass else: raise InvalidRuleException("Unrecognised action %s" % a)
Ensure the prometheus counters for the given auth provider are initialised This fixes a problem where the counters are not reported for a given auth provider until the user first logs in/registers.
def init_counters_for_auth_provider(auth_provider_id: str) -> None: """Ensure the prometheus counters for the given auth provider are initialised This fixes a problem where the counters are not reported for a given auth provider until the user first logs in/registers. """ for is_guest in (True, False): login_counter.labels(guest=is_guest, auth_provider=auth_provider_id) for shadow_banned in (True, False): registration_counter.labels( guest=is_guest, shadow_banned=shadow_banned, auth_provider=auth_provider_id, )
Determines whether the given search filter matches a room entry returned over federation. Only used if the remote server does not support MSC2197 remote-filtered search, and hence does not support MSC3827 filtering of `/publicRooms` by room type either. In this case, we cannot apply the `room_type` filter since no `room_type` field is returned.
def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool: """Determines whether the given search filter matches a room entry returned over federation. Only used if the remote server does not support MSC2197 remote-filtered search, and hence does not support MSC3827 filtering of `/publicRooms` by room type either. In this case, we cannot apply the `room_type` filter since no `room_type` field is returned. """ if search_filter and search_filter.get( PublicRoomsFilterFields.GENERIC_SEARCH_TERM, None ): generic_search_term = search_filter[ PublicRoomsFilterFields.GENERIC_SEARCH_TERM ].upper() if generic_search_term in room_entry.get("name", "").upper(): return True elif generic_search_term in room_entry.get("topic", "").upper(): return True elif generic_search_term in room_entry.get("canonical_alias", "").upper(): return True else: return True return False
Return the list of users which can issue invites. This is done by exploring the joined users and comparing their power levels to the necessyar power level to issue an invite. Args: auth_events: state in force at this point in the room Returns: The users which can issue invites.
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: """ Return the list of users which can issue invites. This is done by exploring the joined users and comparing their power levels to the necessyar power level to issue an invite. Args: auth_events: state in force at this point in the room Returns: The users which can issue invites. """ invite_level = get_named_level(auth_events, "invite", 0) users_default_level = get_named_level(auth_events, "users_default", 0) power_level_event = get_power_level_event(auth_events) # Custom power-levels for users. if power_level_event: users = power_level_event.content.get("users", {}) else: users = {} result = [] # Check which members are able to invite by ensuring they're joined and have # the necessary power level. for (event_type, state_key), event in auth_events.items(): if event_type != EventTypes.Member: continue if event.membership != Membership.JOIN: continue # Check if the user has a custom power level. if users.get(state_key, users_default_level) >= invite_level: result.append(state_key) return result
Resolve a list of users into their servers. Args: users: A list of users. Returns: A set of servers.
def get_servers_from_users(users: List[str]) -> Set[str]: """ Resolve a list of users into their servers. Args: users: A list of users. Returns: A set of servers. """ servers = set() for user in users: try: servers.add(get_domain_from_id(user)) except SynapseError: pass return servers
Generate a value for comparing two child events for ordering. The rules for ordering are: 1. The 'order' key, if it is valid. 2. The 'origin_server_ts' of the 'm.space.child' event. 3. The 'room_id'. Args: child: The event for generating a comparison key. Returns: The comparison key as a tuple of: False if the ordering is valid. The 'order' field or None if it is not given or invalid. The 'origin_server_ts' field. The room ID.
def _child_events_comparison_key( child: EventBase, ) -> Tuple[bool, Optional[str], int, str]: """ Generate a value for comparing two child events for ordering. The rules for ordering are: 1. The 'order' key, if it is valid. 2. The 'origin_server_ts' of the 'm.space.child' event. 3. The 'room_id'. Args: child: The event for generating a comparison key. Returns: The comparison key as a tuple of: False if the ordering is valid. The 'order' field or None if it is not given or invalid. The 'origin_server_ts' field. The room ID. """ order = child.content.get("order") # If order is not a string or doesn't meet the requirements, ignore it. if not isinstance(order, str): order = None elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order): order = None # Items without an order come last. return order is None, order, child.origin_server_ts, child.room_id
Replace any characters which are not allowed in Matrix IDs with a dot.
def dot_replace_for_mxid(username: str) -> str: """Replace any characters which are not allowed in Matrix IDs with a dot.""" username = username.lower() username = DOT_REPLACE_PATTERN.sub(".", username) # regular mxids aren't allowed to start with an underscore either username = re.sub("^_", "", username) return username
Extract the session ID from the cookie Raises a SynapseError if the cookie isn't found
def get_username_mapping_session_cookie_from_request(request: IRequest) -> str: """Extract the session ID from the cookie Raises a SynapseError if the cookie isn't found """ session_id = request.getCookie(USERNAME_MAPPING_SESSION_COOKIE_NAME) if not session_id: raise SynapseError(code=400, msg="missing session_id") return session_id.decode("ascii", errors="replace")
Check if SSO attributes meet the proper requirements. Args: attributes: A mapping of attributes to an iterable of one or more values. requirement: The configured requirement to check. Returns: True if the required attribute was found and had a proper value.
def _check_attribute_requirement( attributes: Mapping[str, List[Any]], req: SsoAttributeRequirement ) -> bool: """Check if SSO attributes meet the proper requirements. Args: attributes: A mapping of attributes to an iterable of one or more values. requirement: The configured requirement to check. Returns: True if the required attribute was found and had a proper value. """ if req.attribute not in attributes: logger.info("SSO attribute missing: %s", req.attribute) return False # If the requirement is None, the attribute existing is enough. if req.value is None: return True values = attributes[req.attribute] if req.value in values: return True logger.info( "SSO attribute %s did not match required value '%s' (was '%s')", req.attribute, req.value, values, ) return False
Works out what state to include in a sync response. Args: timeline_contains: state in the timeline timeline_start: state at the start of the timeline timeline_end: state at the end of the timeline previous_timeline_end: state at the end of the previous sync (or empty dict if this is an initial sync) lazy_load_members: whether to return members from timeline_start or not. assumes that timeline_start has already been filtered to include only the members the client needs to know about.
def _calculate_state( timeline_contains: StateMap[str], timeline_start: StateMap[str], timeline_end: StateMap[str], previous_timeline_end: StateMap[str], lazy_load_members: bool, ) -> StateMap[str]: """Works out what state to include in a sync response. Args: timeline_contains: state in the timeline timeline_start: state at the start of the timeline timeline_end: state at the end of the timeline previous_timeline_end: state at the end of the previous sync (or empty dict if this is an initial sync) lazy_load_members: whether to return members from timeline_start or not. assumes that timeline_start has already been filtered to include only the members the client needs to know about. """ event_id_to_state_key = { event_id: state_key for state_key, event_id in itertools.chain( timeline_contains.items(), timeline_start.items(), timeline_end.items(), previous_timeline_end.items(), ) } timeline_end_ids = set(timeline_end.values()) timeline_start_ids = set(timeline_start.values()) previous_timeline_end_ids = set(previous_timeline_end.values()) timeline_contains_ids = set(timeline_contains.values()) # If we are lazyloading room members, we explicitly add the membership events # for the senders in the timeline into the state block returned by /sync, # as we may not have sent them to the client before. We find these membership # events by filtering them out of timeline_start, which has already been filtered # to only include membership events for the senders in the timeline. # In practice, we can do this by removing them from the previous_timeline_end_ids # list, which is the list of relevant state we know we have already sent to the # client. # see https://github.com/matrix-org/synapse/pull/2970/files/efcdacad7d1b7f52f879179701c7e0d9b763511f#r204732809 if lazy_load_members: previous_timeline_end_ids.difference_update( e for t, e in timeline_start.items() if t[0] == EventTypes.Member ) state_ids = ( (timeline_end_ids | timeline_start_ids) - previous_timeline_end_ids - timeline_contains_ids ) return {event_id_to_state_key[e]: e for e in state_ids}
Calculates the time of a next retry given `now_ts` in ms and the number of failures encountered thus far. Currently the sequence goes: 1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week
def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int: """ Calculates the time of a next retry given `now_ts` in ms and the number of failures encountered thus far. Currently the sequence goes: 1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week """ return now_ts + 60_000 * (5 ** min(retry_count, 7))
Compares an IP address to allowed and disallowed IP sets. Args: ip_address: The IP address to check allowlist: Allowed IP addresses. blocklist: Disallowed IP addresses. Returns: True if the IP address is in the blocklist and not in the allowlist.
def _is_ip_blocked( ip_address: IPAddress, allowlist: Optional[IPSet], blocklist: IPSet ) -> bool: """ Compares an IP address to allowed and disallowed IP sets. Args: ip_address: The IP address to check allowlist: Allowed IP addresses. blocklist: Disallowed IP addresses. Returns: True if the IP address is in the blocklist and not in the allowlist. """ if ip_address in blocklist: if allowlist is None or ip_address not in allowlist: return True return False
Makes a schedular suitable for a Cooperator using the given reactor. (This is effectively just a copy from `twisted.internet.task`)
def _make_scheduler( reactor: IReactorTime, ) -> Callable[[Callable[[], object]], IDelayedCall]: """Makes a schedular suitable for a Cooperator using the given reactor. (This is effectively just a copy from `twisted.internet.task`) """ def _scheduler(x: Callable[[], object]) -> IDelayedCall: return reactor.callLater(_EPSILON, x) return _scheduler
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. If the maximum file size is reached, the returned Deferred will resolve to a Failure with a BodyExceededMaxSize exception. Args: response: The HTTP response to read from. stream: The file-object to write to. max_size: The maximum file size to allow. Returns: A Deferred which resolves to the length of the read body.
def read_body_with_max_size( response: IResponse, stream: ByteWriteable, max_size: Optional[int] ) -> "defer.Deferred[int]": """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. If the maximum file size is reached, the returned Deferred will resolve to a Failure with a BodyExceededMaxSize exception. Args: response: The HTTP response to read from. stream: The file-object to write to. max_size: The maximum file size to allow. Returns: A Deferred which resolves to the length of the read body. """ d: "defer.Deferred[int]" = defer.Deferred() # If the Content-Length header gives a size larger than the maximum allowed # size, do not bother downloading the body. if max_size is not None and response.length != UNKNOWN_LENGTH: if response.length > max_size: response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d)) return d response.deliverBody(_ReadBodyWithMaxSizeProtocol(stream, d, max_size)) return d
Encodes a map of query arguments to bytes which can be appended to a URL. Args: args: The query arguments, a mapping of string to string or list of strings. Returns: The query arguments encoded as bytes.
def encode_query_args(args: Optional[QueryParams]) -> bytes: """ Encodes a map of query arguments to bytes which can be appended to a URL. Args: args: The query arguments, a mapping of string to string or list of strings. Returns: The query arguments encoded as bytes. """ if args is None: return b"" query_str = urllib.parse.urlencode(args, True) return query_str.encode("utf8")
Returns true if the response was due to an endpoint being unimplemented. Args: e: The error response received from the remote server. synapse_error: The above error converted to a SynapseError. This is automatically generated if not provided.
def is_unknown_endpoint( e: HttpResponseException, synapse_error: Optional[SynapseError] = None ) -> bool: """ Returns true if the response was due to an endpoint being unimplemented. Args: e: The error response received from the remote server. synapse_error: The above error converted to a SynapseError. This is automatically generated if not provided. """ if synapse_error is None: synapse_error = e.to_synapse_error() # Matrix v1.6 specifies that servers should return a 404 or 405 with an errcode # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or # to an unknown method, respectively. # # Older versions of servers don't return proper errors, so be graceful. But, # also handle that some endpoints truly do return 404 errors. return ( # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method. (e.code == 404 or e.code == 405) and ( # Consider empty body or non-JSON bodies to be unrecognised (matches # older Dendrites & Conduits). not e.response or not e.response.startswith(b"{") # The proper response JSON with M_UNRECOGNIZED errcode. or synapse_error.errcode == Codes.UNRECOGNIZED ) ) or ( # Older Synapses returned a 400 error. e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED )
Check that a set of HTTP headers have a Content-Type header, and that it is the expected value.. Args: headers: headers to check Raises: RequestSendFailed: if the Content-Type header is missing or doesn't match
def check_content_type_is(headers: Headers, expected_content_type: str) -> None: """ Check that a set of HTTP headers have a Content-Type header, and that it is the expected value.. Args: headers: headers to check Raises: RequestSendFailed: if the Content-Type header is missing or doesn't match """ content_type_headers = headers.getRawHeaders(b"Content-Type") if content_type_headers is None: raise RequestSendFailed( RuntimeError("No Content-Type header received from remote server"), can_retry=False, ) c_type = content_type_headers[0].decode("ascii") # only the first header val, options = cgi.parse_header(c_type) if val != expected_content_type: raise RequestSendFailed( RuntimeError( f"Remote server sent Content-Type header of '{c_type}', not '{expected_content_type}'", ), can_retry=False, )
Parse the `Connection` header to determine which headers we should not be copied over from the remote response. As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1 Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}` Even though "close" is a special directive, let's just treat it as just another header for simplicity. If people want to check for this directive, they can simply check for `"Close" in headers`. Args: connection_header_value: The value of the `Connection` header. Returns: The set of header names that should not be copied over from the remote response. The keys are capitalized in canonical capitalization.
def parse_connection_header_value( connection_header_value: Optional[bytes], ) -> Set[str]: """ Parse the `Connection` header to determine which headers we should not be copied over from the remote response. As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1 Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}` Even though "close" is a special directive, let's just treat it as just another header for simplicity. If people want to check for this directive, they can simply check for `"Close" in headers`. Args: connection_header_value: The value of the `Connection` header. Returns: The set of header names that should not be copied over from the remote response. The keys are capitalized in canonical capitalization. """ headers = Headers() extra_headers_to_remove: Set[str] = set() if connection_header_value: extra_headers_to_remove = { headers._canonicalNameCaps(connection_option.strip()).decode("ascii") for connection_option in connection_header_value.split(b",") } return extra_headers_to_remove
Parses an http proxy setting and returns an endpoint for the proxy Args: proxy: the proxy setting in the form: [scheme://][<username>:<password>@]<host>[:<port>] This currently supports http:// and https:// proxies. A hostname without scheme is assumed to be http. reactor: reactor to be used to connect to the proxy tls_options_factory: the TLS options to use when connecting through a https proxy kwargs: other args to be passed to HostnameEndpoint Returns: a tuple of endpoint to use to connect to the proxy, or None ProxyCredentials or if no credentials were found, or None Raise: ValueError if proxy has no hostname or unsupported scheme. RuntimeError if no tls_options_factory is given for a https connection
def http_proxy_endpoint( proxy: Optional[bytes], reactor: IReactorCore, tls_options_factory: Optional[IPolicyForHTTPS], **kwargs: object, ) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: """Parses an http proxy setting and returns an endpoint for the proxy Args: proxy: the proxy setting in the form: [scheme://][<username>:<password>@]<host>[:<port>] This currently supports http:// and https:// proxies. A hostname without scheme is assumed to be http. reactor: reactor to be used to connect to the proxy tls_options_factory: the TLS options to use when connecting through a https proxy kwargs: other args to be passed to HostnameEndpoint Returns: a tuple of endpoint to use to connect to the proxy, or None ProxyCredentials or if no credentials were found, or None Raise: ValueError if proxy has no hostname or unsupported scheme. RuntimeError if no tls_options_factory is given for a https connection """ if proxy is None: return None, None # Note: urlsplit/urlparse cannot be used here as that does not work (for Python # 3.9+) on scheme-less proxies, e.g. host:port. scheme, host, port, credentials = parse_proxy(proxy) proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs) if scheme == b"https": if tls_options_factory: tls_options = tls_options_factory.creatorForNetloc(host, port) proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint) else: raise RuntimeError( f"No TLS options for a https connection via proxy {proxy!s}" ) return proxy_endpoint, credentials
Parse a proxy connection string. Given a HTTP proxy URL, breaks it down into components and checks that it has a hostname (otherwise it is not useful to us when trying to find a proxy) and asserts that the URL has a scheme we support. Args: proxy: The proxy connection string. Must be in the form '[scheme://][<username>:<password>@]host[:port]'. default_scheme: The default scheme to return if one is not found in `proxy`. Defaults to http default_port: The default port to return if one is not found in `proxy`. Defaults to 1080 Returns: A tuple containing the scheme, hostname, port and ProxyCredentials. If no credentials were found, the ProxyCredentials instance is replaced with None. Raise: ValueError if proxy has no hostname or unsupported scheme.
def parse_proxy( proxy: bytes, default_scheme: bytes = b"http", default_port: int = 1080 ) -> Tuple[bytes, bytes, int, Optional[ProxyCredentials]]: """ Parse a proxy connection string. Given a HTTP proxy URL, breaks it down into components and checks that it has a hostname (otherwise it is not useful to us when trying to find a proxy) and asserts that the URL has a scheme we support. Args: proxy: The proxy connection string. Must be in the form '[scheme://][<username>:<password>@]host[:port]'. default_scheme: The default scheme to return if one is not found in `proxy`. Defaults to http default_port: The default port to return if one is not found in `proxy`. Defaults to 1080 Returns: A tuple containing the scheme, hostname, port and ProxyCredentials. If no credentials were found, the ProxyCredentials instance is replaced with None. Raise: ValueError if proxy has no hostname or unsupported scheme. """ # First check if we have a scheme present # Note: urlsplit/urlparse cannot be used (for Python # 3.9+) on scheme-less proxies, e.g. host:port. if b"://" not in proxy: proxy = b"".join([default_scheme, b"://", proxy]) url = urlparse(proxy) if not url.hostname: raise ValueError("Proxy URL did not contain a hostname! Please specify one.") if url.scheme not in (b"http", b"https"): raise ValueError( f"Unknown proxy scheme {url.scheme!s}; only 'http' and 'https' is supported." ) credentials = None if url.username and url.password: credentials = BasicProxyCredentials( b"".join([url.username, b":", url.password]) ) return url.scheme, url.hostname, url.port or default_port, credentials
Returns a count of all in flight requests by (method, server_name)
def _get_in_flight_counts() -> Mapping[Tuple[str, ...], int]: """Returns a count of all in flight requests by (method, server_name)""" # Cast to a list to prevent it changing while the Prometheus # thread is collecting metrics with _in_flight_requests_lock: reqs = list(_in_flight_requests) for rm in reqs: rm.update_metrics() # Map from (method, name) -> int, the number of in flight requests of that # type. The key type is Tuple[str, str], but we leave the length unspecified # for compatability with LaterGauge's annotations. counts: Dict[Tuple[str, ...], int] = {} for rm in reqs: key = (rm.method, rm.name) counts[key] = counts.get(key, 0) + 1 return counts
Sends a JSON error response to clients.
def return_json_error( f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig] ) -> None: """Sends a JSON error response to clients.""" if f.check(SynapseError): # mypy doesn't understand that f.check asserts the type. exc: SynapseError = f.value error_code = exc.code error_dict = exc.error_dict(config) if exc.headers is not None: for header, value in exc.headers.items(): request.setHeader(header, value) error_ctx = exc.debug_context if error_ctx: logger.info( "%s SynapseError: %s - %s (%s)", request, error_code, exc.msg, error_ctx ) else: logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) elif f.check(CancelledError): error_code = HTTP_STATUS_REQUEST_CANCELLED error_dict = {"error": "Request cancelled", "errcode": Codes.UNKNOWN} if not request._disconnected: logger.error( "Got cancellation before client disconnection from %r: %r", request.request_metrics.name, request, exc_info=(f.type, f.value, f.getTracebackObject()), ) else: error_code = 500 error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN} logger.error( "Failed handle request via %r: %r", request.request_metrics.name, request, exc_info=(f.type, f.value, f.getTracebackObject()), ) # Only respond with an error response if we haven't already started writing, # otherwise lets just kill the connection if request.startedWriting: if request.transport: try: request.transport.abortConnection() except Exception: # abortConnection throws if the connection is already closed pass else: respond_with_json( request, error_code, error_dict, send_cors=True, )
Sends an HTML error page corresponding to the given failure. Handles RedirectException and other CodeMessageExceptions (such as SynapseError) Args: f: the error to report request: the failing request error_template: the HTML template. Can be either a string (with `{code}`, `{msg}` placeholders), or a jinja2 template
def return_html_error( f: failure.Failure, request: Request, error_template: Union[str, jinja2.Template], ) -> None: """Sends an HTML error page corresponding to the given failure. Handles RedirectException and other CodeMessageExceptions (such as SynapseError) Args: f: the error to report request: the failing request error_template: the HTML template. Can be either a string (with `{code}`, `{msg}` placeholders), or a jinja2 template """ if f.check(CodeMessageException): # mypy doesn't understand that f.check asserts the type. cme: CodeMessageException = f.value code = cme.code msg = cme.msg if cme.headers is not None: for header, value in cme.headers.items(): request.setHeader(header, value) if isinstance(cme, RedirectException): logger.info("%s redirect to %s", request, cme.location) request.setHeader(b"location", cme.location) request.cookies.extend(cme.cookies) elif isinstance(cme, SynapseError): logger.info("%s SynapseError: %s - %s", request, code, msg) else: logger.error( "Failed handle request %r", request, exc_info=(f.type, f.value, f.getTracebackObject()), ) elif f.check(CancelledError): code = HTTP_STATUS_REQUEST_CANCELLED msg = "Request cancelled" if not request._disconnected: logger.error( "Got cancellation before client disconnection when handling request %r", request, exc_info=(f.type, f.value, f.getTracebackObject()), ) else: code = HTTPStatus.INTERNAL_SERVER_ERROR msg = "Internal server error" logger.error( "Failed handle request %r", request, exc_info=(f.type, f.value, f.getTracebackObject()), ) if isinstance(error_template, str): body = error_template.format(code=code, msg=html.escape(msg)) else: body = error_template.render(code=code, msg=msg) respond_with_html(request, code, body)
Wraps an async request handler so that it calls request.processing. This helps ensure that work done by the request handler after the request is completed is correctly recorded against the request metrics/logs. The handler method must have a signature of "handle_foo(self, request)", where "request" must be a SynapseRequest. The handler may return a deferred, in which case the completion of the request isn't logged until the deferred completes.
def wrap_async_request_handler( h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]] ) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]: """Wraps an async request handler so that it calls request.processing. This helps ensure that work done by the request handler after the request is completed is correctly recorded against the request metrics/logs. The handler method must have a signature of "handle_foo(self, request)", where "request" must be a SynapseRequest. The handler may return a deferred, in which case the completion of the request isn't logged until the deferred completes. """ async def wrapped_async_request_handler( self: "_AsyncResource", request: "SynapseRequest" ) -> None: with request.processing(): await h(self, request) # we need to preserve_fn here, because the synchronous render method won't yield for # us (obviously) return preserve_fn(wrapped_async_request_handler)
Encode an object into JSON. Returns an iterator of bytes.
def _encode_json_bytes(json_object: object) -> bytes: """ Encode an object into JSON. Returns an iterator of bytes. """ return json_encoder.encode(json_object).encode("utf-8")
Sends encoded JSON in response to the given request. Args: request: The http request to respond to. code: The HTTP response code. json_object: The object to serialize to JSON. send_cors: Whether to send Cross-Origin Resource Sharing headers https://fetch.spec.whatwg.org/#http-cors-protocol canonical_json: Whether to use the canonicaljson algorithm when encoding the JSON bytes. Returns: twisted.web.server.NOT_DONE_YET if the request is still active.
def respond_with_json( request: "SynapseRequest", code: int, json_object: Any, send_cors: bool = False, canonical_json: bool = True, ) -> Optional[int]: """Sends encoded JSON in response to the given request. Args: request: The http request to respond to. code: The HTTP response code. json_object: The object to serialize to JSON. send_cors: Whether to send Cross-Origin Resource Sharing headers https://fetch.spec.whatwg.org/#http-cors-protocol canonical_json: Whether to use the canonicaljson algorithm when encoding the JSON bytes. Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ # The response code must always be set, for logging purposes. request.setResponseCode(code) # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. if request._disconnected: logger.warning( "Not sending response to request %s, already disconnected.", request ) return None if canonical_json: encoder: Callable[[object], bytes] = encode_canonical_json else: encoder = _encode_json_bytes request.setHeader(b"Content-Type", b"application/json") request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") if send_cors: set_cors_headers(request) run_in_background( _async_write_json_to_request_in_thread, request, encoder, json_object ) return NOT_DONE_YET
Sends encoded JSON in response to the given request. Args: request: The http request to respond to. code: The HTTP response code. json_bytes: The json bytes to use as the response body. send_cors: Whether to send Cross-Origin Resource Sharing headers https://fetch.spec.whatwg.org/#http-cors-protocol Returns: twisted.web.server.NOT_DONE_YET if the request is still active.
def respond_with_json_bytes( request: "SynapseRequest", code: int, json_bytes: bytes, send_cors: bool = False, ) -> Optional[int]: """Sends encoded JSON in response to the given request. Args: request: The http request to respond to. code: The HTTP response code. json_bytes: The json bytes to use as the response body. send_cors: Whether to send Cross-Origin Resource Sharing headers https://fetch.spec.whatwg.org/#http-cors-protocol Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ # The response code must always be set, for logging purposes. request.setResponseCode(code) if request._disconnected: logger.warning( "Not sending response to request %s, already disconnected.", request ) return None request.setHeader(b"Content-Type", b"application/json") request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),)) request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate") if send_cors: set_cors_headers(request) _write_bytes_to_request(request, json_bytes) return NOT_DONE_YET
Writes the bytes to the request using an appropriate producer. Note: This should be used instead of `Request.write` to correctly handle large response bodies.
def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None: """Writes the bytes to the request using an appropriate producer. Note: This should be used instead of `Request.write` to correctly handle large response bodies. """ # The problem with dumping all of the response into the `Request` object at # once (via `Request.write`) is that doing so starts the timeout for the # next request to be received: so if it takes longer than 60s to stream back # the response to the client, the client never gets it. # # The correct solution is to use a Producer; then the timeout is only # started once all of the content is sent over the TCP connection. # To make sure we don't write all of the bytes at once we split it up into # chunks. chunk_size = 4096 bytes_generator = chunk_seq(bytes_to_write, chunk_size) # We use a `_ByteProducer` here rather than `NoRangeStaticProducer` as the # unit tests can't cope with being given a pull producer. _ByteProducer(request, bytes_generator)
Set the CORS headers so that javascript running in a web browsers can use this API Args: request: The http request to add CORS to.
def set_cors_headers(request: "SynapseRequest") -> None: """Set the CORS headers so that javascript running in a web browsers can use this API Args: request: The http request to add CORS to. """ request.setHeader(b"Access-Control-Allow-Origin", b"*") request.setHeader( b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS" ) if request.experimental_cors_msc3886: request.setHeader( b"Access-Control-Allow-Headers", b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match", ) request.setHeader( b"Access-Control-Expose-Headers", b"ETag, Location, X-Max-Bytes", ) else: request.setHeader( b"Access-Control-Allow-Headers", b"X-Requested-With, Content-Type, Authorization, Date", ) request.setHeader( b"Access-Control-Expose-Headers", b"Synapse-Trace-Id, Server", )
Set the CORP headers so that javascript running in a web browsers can embed the resource returned from this request when their client requires the `Cross-Origin-Embedder-Policy: require-corp` header. Args: request: The http request to add the CORP header to.
def set_corp_headers(request: Request) -> None: """Set the CORP headers so that javascript running in a web browsers can embed the resource returned from this request when their client requires the `Cross-Origin-Embedder-Policy: require-corp` header. Args: request: The http request to add the CORP header to. """ request.setHeader(b"Cross-Origin-Resource-Policy", b"cross-origin")
Wraps `respond_with_html_bytes` by first encoding HTML from a str to UTF-8 bytes.
def respond_with_html(request: Request, code: int, html: str) -> None: """ Wraps `respond_with_html_bytes` by first encoding HTML from a str to UTF-8 bytes. """ respond_with_html_bytes(request, code, html.encode("utf-8"))
Sends HTML (encoded as UTF-8 bytes) as the response to the given request. Note that this adds clickjacking protection headers and finishes the request. Args: request: The http request to respond to. code: The HTTP response code. html_bytes: The HTML bytes to use as the response body.
def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> None: """ Sends HTML (encoded as UTF-8 bytes) as the response to the given request. Note that this adds clickjacking protection headers and finishes the request. Args: request: The http request to respond to. code: The HTTP response code. html_bytes: The HTML bytes to use as the response body. """ # The response code must always be set, for logging purposes. request.setResponseCode(code) # could alternatively use request.notifyFinish() and flip a flag when # the Deferred fires, but since the flag is RIGHT THERE it seems like # a waste. if request._disconnected: logger.warning( "Not sending response to request %s, already disconnected.", request ) return None request.setHeader(b"Content-Type", b"text/html; charset=utf-8") request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) # Ensure this content cannot be embedded. set_clickjacking_protection_headers(request) request.write(html_bytes) finish_request(request)
Set headers to guard against clickjacking of embedded content. This sets the X-Frame-Options and Content-Security-Policy headers which instructs browsers to not allow the HTML of the response to be embedded onto another page. Args: request: The http request to add the headers to.
def set_clickjacking_protection_headers(request: Request) -> None: """ Set headers to guard against clickjacking of embedded content. This sets the X-Frame-Options and Content-Security-Policy headers which instructs browsers to not allow the HTML of the response to be embedded onto another page. Args: request: The http request to add the headers to. """ request.setHeader(b"X-Frame-Options", b"DENY") request.setHeader(b"Content-Security-Policy", b"frame-ancestors 'none';")
Write a 302 (or other specified status code) response to the request, if it is still alive. Args: request: The http request to respond to. url: The URL to redirect to. statusCode: The HTTP status code to use for the redirect (defaults to 302). cors: Whether to set CORS headers on the response.
def respond_with_redirect( request: "SynapseRequest", url: bytes, statusCode: int = FOUND, cors: bool = False ) -> None: """ Write a 302 (or other specified status code) response to the request, if it is still alive. Args: request: The http request to respond to. url: The URL to redirect to. statusCode: The HTTP status code to use for the redirect (defaults to 302). cors: Whether to set CORS headers on the response. """ logger.debug("Redirect to %s", url.decode("utf-8")) if cors: set_cors_headers(request) request.setResponseCode(statusCode) request.setHeader(b"location", url) finish_request(request)
Finish writing the response to the request. Twisted throws a RuntimeException if the connection closed before the response was written but doesn't provide a convenient or reliable way to determine if the connection was closed. So we catch and log the RuntimeException You might think that ``request.notifyFinish`` could be used to tell if the request was finished. However the deferred it returns won't fire if the connection was already closed, meaning we'd have to have called the method right at the start of the request. By the time we want to write the response it will already be too late.
def finish_request(request: Request) -> None: """Finish writing the response to the request. Twisted throws a RuntimeException if the connection closed before the response was written but doesn't provide a convenient or reliable way to determine if the connection was closed. So we catch and log the RuntimeException You might think that ``request.notifyFinish`` could be used to tell if the request was finished. However the deferred it returns won't fire if the connection was already closed, meaning we'd have to have called the method right at the start of the request. By the time we want to write the response it will already be too late. """ try: request.finish() except RuntimeError as e: logger.info("Connection disconnected before response was written: %r", e)
Parse an integer parameter from the request string Args: request: the twisted HTTP request. name: the name of the query parameter. default: value to use if the parameter is absent, defaults to None. required: whether to raise a 400 SynapseError if the parameter is absent, defaults to False. Returns: An int value or the default. Raises: SynapseError: if the parameter is absent and required, or if the parameter is present and not an integer.
def parse_integer( request: Request, name: str, default: Optional[int] = None, required: bool = False ) -> Optional[int]: """Parse an integer parameter from the request string Args: request: the twisted HTTP request. name: the name of the query parameter. default: value to use if the parameter is absent, defaults to None. required: whether to raise a 400 SynapseError if the parameter is absent, defaults to False. Returns: An int value or the default. Raises: SynapseError: if the parameter is absent and required, or if the parameter is present and not an integer. """ args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore return parse_integer_from_args(args, name, default, required)